Line data Source code
1 : // Copyright 2023 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package pebble
6 :
7 : import (
8 : "context"
9 : "fmt"
10 :
11 : "github.com/cockroachdb/errors"
12 : "github.com/cockroachdb/pebble/internal/base"
13 : "github.com/cockroachdb/pebble/internal/invariants"
14 : "github.com/cockroachdb/pebble/internal/keyspan"
15 : "github.com/cockroachdb/pebble/internal/manifest"
16 : "github.com/cockroachdb/pebble/objstorage"
17 : )
18 :
19 : const (
20 : // In skip-shared iteration mode, keys in levels sharedLevelsStart and greater
21 : // (i.e. lower in the LSM) are skipped.
22 : sharedLevelsStart = 5
23 : )
24 :
25 : // ErrInvalidSkipSharedIteration is returned by ScanInternal if it was called
26 : // with a shared file visitor function, and a file in a shareable level (i.e.
27 : // level >= sharedLevelsStart) was found to not be in shared storage according
28 : // to objstorage.Provider, or not shareable for another reason such as for
29 : // containing keys newer than the snapshot sequence number.
30 : var ErrInvalidSkipSharedIteration = errors.New("pebble: cannot use skip-shared iteration due to non-shareable files in lower levels")
31 :
32 : // SharedSSTMeta represents an sstable on shared storage that can be ingested
33 : // by another pebble instance. This struct must contain all fields that are
34 : // required for a Pebble instance to ingest a foreign sstable on shared storage,
35 : // including constructing any relevant objstorage.Provider / remoteobjcat.Catalog
36 : // data structures, as well as creating virtual FileMetadatas.
37 : //
38 : // Note that the Pebble instance creating and returning a SharedSSTMeta might
39 : // not be the one that created the underlying sstable on shared storage to begin
40 : // with; it's possible for a Pebble instance to reshare an sstable that was
41 : // shared to it.
42 : type SharedSSTMeta struct {
43 : // Backing is the shared object underlying this SST. Can be attached to an
44 : // objstorage.Provider.
45 : Backing objstorage.RemoteObjectBackingHandle
46 :
47 : // Smallest and Largest internal keys for the overall bounds. The kind and
48 : // SeqNum of these will reflect what is physically present on the source Pebble
49 : // instance's view of the sstable; it's up to the ingesting instance to set the
50 : // sequence number in the trailer to match the read-time sequence numbers
51 : // reserved for the level this SST is being ingested into. The Kind is expected
52 : // to remain unchanged by the ingesting instance.
53 : //
54 : // Note that these bounds could be narrower than the bounds of the underlying
55 : // sstable; ScanInternal is expected to truncate sstable bounds to the user key
56 : // bounds passed into that method.
57 : Smallest, Largest InternalKey
58 :
59 : // SmallestRangeKey and LargestRangeKey are internal keys that denote the
60 : // range key bounds of this sstable. Must lie within [Smallest, Largest].
61 : SmallestRangeKey, LargestRangeKey InternalKey
62 :
63 : // SmallestPointKey and LargestPointKey are internal keys that denote the
64 : // point key bounds of this sstable. Must lie within [Smallest, Largest].
65 : SmallestPointKey, LargestPointKey InternalKey
66 :
67 : // Level denotes the level at which this file was present at read time.
68 : // For files visited by ScanInternal, this value will only be 5 or 6.
69 : Level uint8
70 :
71 : // Size contains an estimate of the size of this sstable.
72 : Size uint64
73 :
74 : // fileNum at time of creation in the creator instance. Only used for
75 : // debugging/tests.
76 : fileNum base.FileNum
77 : }
78 :
79 1 : func (s *SharedSSTMeta) cloneFromFileMeta(f *fileMetadata) {
80 1 : *s = SharedSSTMeta{
81 1 : Smallest: f.Smallest.Clone(),
82 1 : Largest: f.Largest.Clone(),
83 1 : SmallestRangeKey: f.SmallestRangeKey.Clone(),
84 1 : LargestRangeKey: f.LargestRangeKey.Clone(),
85 1 : SmallestPointKey: f.SmallestPointKey.Clone(),
86 1 : LargestPointKey: f.LargestPointKey.Clone(),
87 1 : Size: f.Size,
88 1 : fileNum: f.FileNum,
89 1 : }
90 1 : }
91 :
92 : type sharedByLevel []SharedSSTMeta
93 :
94 1 : func (s sharedByLevel) Len() int { return len(s) }
95 0 : func (s sharedByLevel) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
96 1 : func (s sharedByLevel) Less(i, j int) bool { return s[i].Level < s[j].Level }
97 :
98 : type pcIterPos int
99 :
100 : const (
101 : pcIterPosCur pcIterPos = iota
102 : pcIterPosNext
103 : )
104 :
105 : // pointCollapsingIterator is an internalIterator that collapses point keys and
106 : // returns at most one point internal key for each user key. Merges and
107 : // SingleDels are not supported and result in a panic if encountered. Point keys
108 : // deleted by rangedels are considered shadowed and not exposed.
109 : //
110 : // Only used in ScanInternal to return at most one internal key per user key.
111 : type pointCollapsingIterator struct {
112 : iter keyspan.InterleavingIter
113 : pos pcIterPos
114 : comparer *base.Comparer
115 : merge base.Merge
116 : err error
117 : seqNum uint64
118 : // The current position of `iter`. Always owned by the underlying iter.
119 : iterKey *InternalKey
120 : // The last saved key. findNextEntry and similar methods are expected to save
121 : // the current value of iterKey to savedKey if they're iterating away from the
122 : // current key but still need to retain it. See comments in findNextEntry on
123 : // how this field is used.
124 : //
125 : // At the end of a positioning call:
126 : // - if pos == pcIterPosNext, iterKey is pointing to the next user key owned
127 : // by `iter` while savedKey is holding a copy to our current key.
128 : // - If pos == pcIterPosCur, iterKey is pointing to an `iter`-owned current
129 : // key, and savedKey is either undefined or pointing to a version of the
130 : // current key owned by this iterator (i.e. backed by savedKeyBuf).
131 : savedKey InternalKey
132 : savedKeyBuf []byte
133 : // Value at the current iterator position, at iterKey.
134 : iterValue base.LazyValue
135 : // If fixedSeqNum is non-zero, all emitted points are verified to have this
136 : // fixed sequence number.
137 : fixedSeqNum uint64
138 : }
139 :
140 1 : func (p *pointCollapsingIterator) Span() *keyspan.Span {
141 1 : return p.iter.Span()
142 1 : }
143 :
144 : // SeekPrefixGE implements the InternalIterator interface.
145 : func (p *pointCollapsingIterator) SeekPrefixGE(
146 : prefix, key []byte, flags base.SeekGEFlags,
147 0 : ) (*base.InternalKey, base.LazyValue) {
148 0 : p.resetKey()
149 0 : p.iterKey, p.iterValue = p.iter.SeekPrefixGE(prefix, key, flags)
150 0 : p.pos = pcIterPosCur
151 0 : if p.iterKey == nil {
152 0 : return nil, base.LazyValue{}
153 0 : }
154 0 : return p.findNextEntry()
155 : }
156 :
157 : // SeekGE implements the InternalIterator interface.
158 : func (p *pointCollapsingIterator) SeekGE(
159 : key []byte, flags base.SeekGEFlags,
160 1 : ) (*base.InternalKey, base.LazyValue) {
161 1 : p.resetKey()
162 1 : p.iterKey, p.iterValue = p.iter.SeekGE(key, flags)
163 1 : p.pos = pcIterPosCur
164 1 : if p.iterKey == nil {
165 1 : return nil, base.LazyValue{}
166 1 : }
167 1 : return p.findNextEntry()
168 : }
169 :
170 : // SeekLT implements the InternalIterator interface.
171 : func (p *pointCollapsingIterator) SeekLT(
172 : key []byte, flags base.SeekLTFlags,
173 0 : ) (*base.InternalKey, base.LazyValue) {
174 0 : panic("unimplemented")
175 : }
176 :
177 1 : func (p *pointCollapsingIterator) resetKey() {
178 1 : p.savedKey.UserKey = p.savedKeyBuf[:0]
179 1 : p.savedKey.Trailer = 0
180 1 : p.iterKey = nil
181 1 : p.pos = pcIterPosCur
182 1 : }
183 :
184 1 : func (p *pointCollapsingIterator) verifySeqNum(key *base.InternalKey) *base.InternalKey {
185 1 : if !invariants.Enabled {
186 0 : return key
187 0 : }
188 1 : if p.fixedSeqNum == 0 || key == nil || key.Kind() == InternalKeyKindRangeDelete {
189 1 : return key
190 1 : }
191 0 : if key.SeqNum() != p.fixedSeqNum {
192 0 : panic(fmt.Sprintf("expected foreign point key to have seqnum %d, got %d", p.fixedSeqNum, key.SeqNum()))
193 : }
194 0 : return key
195 : }
196 :
197 : // findNextEntry is called to return the next key. p.iter must be positioned at the
198 : // start of the first user key we are interested in.
199 1 : func (p *pointCollapsingIterator) findNextEntry() (*base.InternalKey, base.LazyValue) {
200 1 : p.saveKey()
201 1 : // Saves a comparison in the fast path
202 1 : firstIteration := true
203 1 : for p.iterKey != nil {
204 1 : // NB: p.savedKey is either the current key (iff p.iterKey == firstKey),
205 1 : // or the previous key.
206 1 : if !firstIteration && !p.comparer.Equal(p.iterKey.UserKey, p.savedKey.UserKey) {
207 1 : p.saveKey()
208 1 : continue
209 : }
210 1 : firstIteration = false
211 1 : if s := p.iter.Span(); s != nil && s.CoversAt(p.seqNum, p.iterKey.SeqNum()) {
212 1 : // All future keys for this user key must be deleted.
213 1 : if p.savedKey.Kind() == InternalKeyKindSingleDelete {
214 0 : panic("cannot process singledel key in point collapsing iterator")
215 : }
216 : // Fast forward to the next user key.
217 1 : p.saveKey()
218 1 : p.iterKey, p.iterValue = p.iter.Next()
219 1 : for p.iterKey != nil && p.savedKey.SeqNum() >= p.iterKey.SeqNum() && p.comparer.Equal(p.iterKey.UserKey, p.savedKey.UserKey) {
220 1 : p.iterKey, p.iterValue = p.iter.Next()
221 1 : }
222 1 : continue
223 : }
224 1 : switch p.savedKey.Kind() {
225 1 : case InternalKeyKindSet, InternalKeyKindDelete, InternalKeyKindSetWithDelete, InternalKeyKindDeleteSized:
226 1 : // Note that we return SETs directly, even if they would otherwise get
227 1 : // compacted into a Del to turn into a SetWithDelete. This is a fast
228 1 : // path optimization that can break SINGLEDEL determinism. To lead to
229 1 : // consistent SINGLEDEL behaviour, this iterator should *not* be used for
230 1 : // a keyspace where SINGLEDELs could be in use. If this iterator observes
231 1 : // a SINGLEDEL as the first internal key for a user key, it will panic.
232 1 : //
233 1 : // As p.value is a lazy value owned by the child iterator, we can thread
234 1 : // it through without loading it into p.valueBuf.
235 1 : //
236 1 : // TODO(bilal): We can even avoid saving the key in this fast path if
237 1 : // we are in a block where setHasSamePrefix = false in a v3 sstable,
238 1 : // guaranteeing that there's only one internal key for each user key.
239 1 : // Thread this logic through the sstable iterators and/or consider
240 1 : // collapsing (ha) this logic into the sstable iterators that are aware
241 1 : // of blocks and can determine user key changes without doing key saves
242 1 : // or comparisons.
243 1 : p.pos = pcIterPosCur
244 1 : return p.verifySeqNum(p.iterKey), p.iterValue
245 0 : case InternalKeyKindSingleDelete:
246 0 : // Panic, as this iterator is not expected to observe single deletes.
247 0 : panic("cannot process singledel key in point collapsing iterator")
248 0 : case InternalKeyKindMerge:
249 0 : // Panic, as this iterator is not expected to observe merges.
250 0 : panic("cannot process merge key in point collapsing iterator")
251 1 : case InternalKeyKindRangeDelete:
252 1 : // These are interleaved by the interleaving iterator ahead of all points.
253 1 : // We should pass them as-is, but also account for any points ahead of
254 1 : // them.
255 1 : p.pos = pcIterPosCur
256 1 : return p.verifySeqNum(p.iterKey), p.iterValue
257 0 : default:
258 0 : panic(fmt.Sprintf("unexpected kind: %d", p.iterKey.Kind()))
259 : }
260 : }
261 0 : p.resetKey()
262 0 : return nil, base.LazyValue{}
263 : }
264 :
265 : // First implements the InternalIterator interface.
266 1 : func (p *pointCollapsingIterator) First() (*base.InternalKey, base.LazyValue) {
267 1 : p.resetKey()
268 1 : p.iterKey, p.iterValue = p.iter.First()
269 1 : p.pos = pcIterPosCur
270 1 : if p.iterKey == nil {
271 0 : return nil, base.LazyValue{}
272 0 : }
273 1 : return p.findNextEntry()
274 : }
275 :
276 : // Last implements the InternalIterator interface.
277 0 : func (p *pointCollapsingIterator) Last() (*base.InternalKey, base.LazyValue) {
278 0 : panic("unimplemented")
279 : }
280 :
281 1 : func (p *pointCollapsingIterator) saveKey() {
282 1 : if p.iterKey == nil {
283 1 : p.savedKey = InternalKey{UserKey: p.savedKeyBuf[:0]}
284 1 : return
285 1 : }
286 1 : p.savedKeyBuf = append(p.savedKeyBuf[:0], p.iterKey.UserKey...)
287 1 : p.savedKey = InternalKey{UserKey: p.savedKeyBuf, Trailer: p.iterKey.Trailer}
288 : }
289 :
290 : // Next implements the InternalIterator interface.
291 1 : func (p *pointCollapsingIterator) Next() (*base.InternalKey, base.LazyValue) {
292 1 : switch p.pos {
293 1 : case pcIterPosCur:
294 1 : p.saveKey()
295 1 : if p.iterKey != nil && p.iterKey.Kind() == InternalKeyKindRangeDelete {
296 1 : // Step over the interleaved range delete and process the very next
297 1 : // internal key, even if it's at the same user key. This is because a
298 1 : // point for that user key has not been returned yet.
299 1 : p.iterKey, p.iterValue = p.iter.Next()
300 1 : break
301 : }
302 : // Fast forward to the next user key.
303 1 : key, val := p.iter.Next()
304 1 : // p.iterKey.SeqNum() >= key.SeqNum() is an optimization that allows us to
305 1 : // use p.iterKey.SeqNum() < key.SeqNum() as a sign that the user key has
306 1 : // changed, without needing to do the full key comparison.
307 1 : for key != nil && p.savedKey.SeqNum() >= key.SeqNum() &&
308 1 : p.comparer.Equal(p.savedKey.UserKey, key.UserKey) {
309 1 : key, val = p.iter.Next()
310 1 : }
311 1 : if key == nil {
312 1 : // There are no keys to return.
313 1 : p.resetKey()
314 1 : return nil, base.LazyValue{}
315 1 : }
316 1 : p.iterKey, p.iterValue = key, val
317 0 : case pcIterPosNext:
318 0 : p.pos = pcIterPosCur
319 : }
320 1 : if p.iterKey == nil {
321 1 : p.resetKey()
322 1 : return nil, base.LazyValue{}
323 1 : }
324 1 : return p.findNextEntry()
325 : }
326 :
327 : // NextPrefix implements the InternalIterator interface.
328 0 : func (p *pointCollapsingIterator) NextPrefix(succKey []byte) (*base.InternalKey, base.LazyValue) {
329 0 : panic("unimplemented")
330 : }
331 :
332 : // Prev implements the InternalIterator interface.
333 0 : func (p *pointCollapsingIterator) Prev() (*base.InternalKey, base.LazyValue) {
334 0 : panic("unimplemented")
335 : }
336 :
337 : // Error implements the InternalIterator interface.
338 1 : func (p *pointCollapsingIterator) Error() error {
339 1 : if p.err != nil {
340 0 : return p.err
341 0 : }
342 1 : return p.iter.Error()
343 : }
344 :
345 : // Close implements the InternalIterator interface.
346 1 : func (p *pointCollapsingIterator) Close() error {
347 1 : return p.iter.Close()
348 1 : }
349 :
350 : // SetBounds implements the InternalIterator interface.
351 0 : func (p *pointCollapsingIterator) SetBounds(lower, upper []byte) {
352 0 : p.resetKey()
353 0 : p.iter.SetBounds(lower, upper)
354 0 : }
355 :
356 : // String implements the InternalIterator interface.
357 0 : func (p *pointCollapsingIterator) String() string {
358 0 : return p.iter.String()
359 0 : }
360 :
361 : var _ internalIterator = &pointCollapsingIterator{}
362 :
363 : // IteratorLevelKind is used to denote whether the current ScanInternal iterator
364 : // is unknown, belongs to a flushable, or belongs to an LSM level type.
365 : type IteratorLevelKind int8
366 :
367 : const (
368 : // IteratorLevelUnknown indicates an unknown LSM level.
369 : IteratorLevelUnknown IteratorLevelKind = iota
370 : // IteratorLevelLSM indicates an LSM level.
371 : IteratorLevelLSM
372 : // IteratorLevelFlushable indicates a flushable (i.e. memtable).
373 : IteratorLevelFlushable
374 : )
375 :
376 : // IteratorLevel is used with scanInternalIterator to surface additional iterator-specific info where possible.
377 : // Note: this is struct is only provided for point keys.
378 : type IteratorLevel struct {
379 : Kind IteratorLevelKind
380 : // FlushableIndex indicates the position within the flushable queue of this level.
381 : // Only valid if kind == IteratorLevelFlushable.
382 : FlushableIndex int
383 : // The level within the LSM. Only valid if Kind == IteratorLevelLSM.
384 : Level int
385 : // Sublevel is only valid if Kind == IteratorLevelLSM and Level == 0.
386 : Sublevel int
387 : }
388 :
389 : // scanInternalIterator is an iterator that returns all internal keys, including
390 : // tombstones. For instance, an InternalKeyKindDelete would be returned as an
391 : // InternalKeyKindDelete instead of the iterator skipping over to the next key.
392 : // Internal keys within a user key are collapsed, eg. if there are two SETs, the
393 : // one with the higher sequence is returned. Useful if an external user of Pebble
394 : // needs to observe and rebuild Pebble's history of internal keys, such as in
395 : // node-to-node replication. For use with {db,snapshot}.ScanInternal().
396 : //
397 : // scanInternalIterator is expected to ignore point keys deleted by range
398 : // deletions, and range keys shadowed by a range key unset or delete, however it
399 : // *must* return the range delete as well as the range key unset/delete that did
400 : // the shadowing.
401 : type scanInternalIterator struct {
402 : db *DB
403 : opts scanInternalOptions
404 : comparer *base.Comparer
405 : merge Merge
406 : iter internalIterator
407 : readState *readState
408 : version *version
409 : rangeKey *iteratorRangeKeyState
410 : pointKeyIter internalIterator
411 : iterKey *InternalKey
412 : iterValue LazyValue
413 : alloc *iterAlloc
414 : newIters tableNewIters
415 : newIterRangeKey keyspan.TableNewSpanIter
416 : seqNum uint64
417 : iterLevels []IteratorLevel
418 : mergingIter *mergingIter
419 :
420 : // boundsBuf holds two buffers used to store the lower and upper bounds.
421 : // Whenever the InternalIterator's bounds change, the new bounds are copied
422 : // into boundsBuf[boundsBufIdx]. The two bounds share a slice to reduce
423 : // allocations. opts.LowerBound and opts.UpperBound point into this slice.
424 : boundsBuf [2][]byte
425 : boundsBufIdx int
426 : }
427 :
428 : // truncateSharedFile truncates a shared file's [Smallest, Largest] fields to
429 : // [lower, upper), potentially opening iterators on the file to find keys within
430 : // the requested bounds. A SharedSSTMeta is produced that is suitable for
431 : // external consumption by other Pebble instances. If shouldSkip is true, this
432 : // file does not contain any keys in [lower, upper) and can be skipped.
433 : //
434 : // TODO(bilal): If opening iterators and doing reads in this method is too
435 : // inefficient, consider producing non-tight file bounds instead.
436 : func (d *DB) truncateSharedFile(
437 : ctx context.Context,
438 : lower, upper []byte,
439 : level int,
440 : file *fileMetadata,
441 : objMeta objstorage.ObjectMetadata,
442 1 : ) (sst *SharedSSTMeta, shouldSkip bool, err error) {
443 1 : cmp := d.cmp
444 1 : sst = &SharedSSTMeta{}
445 1 : sst.cloneFromFileMeta(file)
446 1 : sst.Level = uint8(level)
447 1 : sst.Backing, err = d.objProvider.RemoteObjectBacking(&objMeta)
448 1 : if err != nil {
449 0 : return nil, false, err
450 0 : }
451 1 : needsLowerTruncate := cmp(lower, file.Smallest.UserKey) > 0
452 1 : needsUpperTruncate := cmp(upper, file.Largest.UserKey) < 0 || (cmp(upper, file.Largest.UserKey) == 0 && !file.Largest.IsExclusiveSentinel())
453 1 : // Fast path: file is entirely within [lower, upper).
454 1 : if !needsLowerTruncate && !needsUpperTruncate {
455 1 : return sst, false, nil
456 1 : }
457 :
458 : // We will need to truncate file bounds in at least one direction. Open all
459 : // relevant iterators.
460 1 : iter, rangeDelIter, err := d.newIters(ctx, file, &IterOptions{
461 1 : LowerBound: lower,
462 1 : UpperBound: upper,
463 1 : level: manifest.Level(level),
464 1 : }, internalIterOpts{})
465 1 : if err != nil {
466 0 : return nil, false, err
467 0 : }
468 1 : defer iter.Close()
469 1 : if rangeDelIter != nil {
470 1 : rangeDelIter = keyspan.Truncate(
471 1 : cmp, rangeDelIter, lower, upper, nil, nil,
472 1 : false, /* panicOnUpperTruncate */
473 1 : )
474 1 : defer rangeDelIter.Close()
475 1 : }
476 1 : rangeKeyIter, err := d.tableNewRangeKeyIter(file, keyspan.SpanIterOptions{})
477 1 : if err != nil {
478 0 : return nil, false, err
479 0 : }
480 1 : if rangeKeyIter != nil {
481 1 : rangeKeyIter = keyspan.Truncate(
482 1 : cmp, rangeKeyIter, lower, upper, nil, nil,
483 1 : false, /* panicOnUpperTruncate */
484 1 : )
485 1 : defer rangeKeyIter.Close()
486 1 : }
487 : // Check if we need to truncate on the left side. This means finding a new
488 : // LargestPointKey and LargestRangeKey that is >= lower.
489 1 : if needsLowerTruncate {
490 1 : sst.SmallestPointKey.UserKey = sst.SmallestPointKey.UserKey[:0]
491 1 : sst.SmallestPointKey.Trailer = 0
492 1 : key, _ := iter.SeekGE(lower, base.SeekGEFlagsNone)
493 1 : foundPointKey := key != nil
494 1 : if key != nil {
495 1 : sst.SmallestPointKey.CopyFrom(*key)
496 1 : }
497 1 : if rangeDelIter != nil {
498 1 : span := rangeDelIter.SeekGE(lower)
499 1 : if span != nil && (len(sst.SmallestPointKey.UserKey) == 0 || base.InternalCompare(cmp, span.SmallestKey(), sst.SmallestPointKey) < 0) {
500 1 : sst.SmallestPointKey.CopyFrom(span.SmallestKey())
501 1 : foundPointKey = true
502 1 : }
503 : }
504 1 : if !foundPointKey {
505 1 : // There are no point keys in the span we're interested in.
506 1 : sst.SmallestPointKey = InternalKey{}
507 1 : sst.LargestPointKey = InternalKey{}
508 1 : }
509 1 : sst.SmallestRangeKey.UserKey = sst.SmallestRangeKey.UserKey[:0]
510 1 : sst.SmallestRangeKey.Trailer = 0
511 1 : if rangeKeyIter != nil {
512 1 : span := rangeKeyIter.SeekGE(lower)
513 1 : if span != nil {
514 1 : sst.SmallestRangeKey.CopyFrom(span.SmallestKey())
515 1 : } else {
516 1 : // There are no range keys in the span we're interested in.
517 1 : sst.SmallestRangeKey = InternalKey{}
518 1 : sst.LargestRangeKey = InternalKey{}
519 1 : }
520 : }
521 : }
522 : // Check if we need to truncate on the right side. This means finding a new
523 : // LargestPointKey and LargestRangeKey that is < upper.
524 1 : if needsUpperTruncate {
525 1 : sst.LargestPointKey.UserKey = sst.LargestPointKey.UserKey[:0]
526 1 : sst.LargestPointKey.Trailer = 0
527 1 : key, _ := iter.SeekLT(upper, base.SeekLTFlagsNone)
528 1 : foundPointKey := key != nil
529 1 : if key != nil {
530 1 : sst.LargestPointKey.CopyFrom(*key)
531 1 : }
532 1 : if rangeDelIter != nil {
533 1 : span := rangeDelIter.SeekLT(upper)
534 1 : if span != nil && (len(sst.LargestPointKey.UserKey) == 0 || base.InternalCompare(cmp, span.LargestKey(), sst.LargestPointKey) > 0) {
535 1 : sst.LargestPointKey.CopyFrom(span.LargestKey())
536 1 : foundPointKey = true
537 1 : }
538 : }
539 1 : if !foundPointKey {
540 1 : // There are no point keys in the span we're interested in.
541 1 : sst.SmallestPointKey = InternalKey{}
542 1 : sst.LargestPointKey = InternalKey{}
543 1 : }
544 1 : sst.LargestRangeKey.UserKey = sst.LargestRangeKey.UserKey[:0]
545 1 : sst.LargestRangeKey.Trailer = 0
546 1 : if rangeKeyIter != nil {
547 1 : span := rangeKeyIter.SeekLT(upper)
548 1 : if span != nil {
549 1 : sst.LargestRangeKey.CopyFrom(span.LargestKey())
550 1 : } else {
551 1 : // There are no range keys in the span we're interested in.
552 1 : sst.SmallestRangeKey = InternalKey{}
553 1 : sst.LargestRangeKey = InternalKey{}
554 1 : }
555 : }
556 : }
557 : // Set overall bounds based on {Smallest,Largest}{Point,Range}Key.
558 1 : switch {
559 1 : case len(sst.SmallestRangeKey.UserKey) == 0:
560 1 : sst.Smallest = sst.SmallestPointKey
561 1 : case len(sst.SmallestPointKey.UserKey) == 0:
562 1 : sst.Smallest = sst.SmallestRangeKey
563 1 : default:
564 1 : sst.Smallest = sst.SmallestPointKey
565 1 : if base.InternalCompare(cmp, sst.SmallestRangeKey, sst.SmallestPointKey) < 0 {
566 1 : sst.Smallest = sst.SmallestRangeKey
567 1 : }
568 : }
569 1 : switch {
570 1 : case len(sst.LargestRangeKey.UserKey) == 0:
571 1 : sst.Largest = sst.LargestPointKey
572 1 : case len(sst.LargestPointKey.UserKey) == 0:
573 1 : sst.Largest = sst.LargestRangeKey
574 1 : default:
575 1 : sst.Largest = sst.LargestPointKey
576 1 : if base.InternalCompare(cmp, sst.LargestRangeKey, sst.LargestPointKey) > 0 {
577 1 : sst.Largest = sst.LargestRangeKey
578 1 : }
579 : }
580 : // On rare occasion, a file might overlap with [lower, upper) but not actually
581 : // have any keys within those bounds. Skip such files.
582 1 : if len(sst.Smallest.UserKey) == 0 {
583 1 : return nil, true, nil
584 1 : }
585 1 : sst.Size, err = d.tableCache.estimateSize(file, sst.Smallest.UserKey, sst.Largest.UserKey)
586 1 : if err != nil {
587 0 : return nil, false, err
588 0 : }
589 : // On occasion, estimateSize gives us a low estimate, i.e. a 0 file size. This
590 : // can cause panics in places where we divide by file sizes. Correct for it
591 : // here.
592 1 : if sst.Size == 0 {
593 1 : sst.Size = 1
594 1 : }
595 1 : return sst, false, nil
596 : }
597 :
598 : func scanInternalImpl(
599 : ctx context.Context, lower, upper []byte, iter *scanInternalIterator, opts *scanInternalOptions,
600 1 : ) error {
601 1 : if opts.visitSharedFile != nil && (lower == nil || upper == nil) {
602 0 : panic("lower and upper bounds must be specified in skip-shared iteration mode")
603 : }
604 : // Before starting iteration, check if any files in levels sharedLevelsStart
605 : // and below are *not* shared. Error out if that is the case, as skip-shared
606 : // iteration will not produce a consistent point-in-time view of this range
607 : // of keys. For files that are shared, call visitSharedFile with a truncated
608 : // version of that file.
609 1 : cmp := iter.comparer.Compare
610 1 : provider := iter.db.ObjProvider()
611 1 : seqNum := iter.seqNum
612 1 : current := iter.version
613 1 : if current == nil {
614 1 : current = iter.readState.current
615 1 : }
616 1 : if opts.visitSharedFile != nil {
617 1 : if provider == nil {
618 0 : panic("expected non-nil Provider in skip-shared iteration mode")
619 : }
620 1 : for level := sharedLevelsStart; level < numLevels; level++ {
621 1 : files := current.Levels[level].Iter()
622 1 : for f := files.SeekGE(cmp, lower); f != nil && cmp(f.Smallest.UserKey, upper) < 0; f = files.Next() {
623 1 : var objMeta objstorage.ObjectMetadata
624 1 : var err error
625 1 : objMeta, err = provider.Lookup(fileTypeTable, f.FileBacking.DiskFileNum)
626 1 : if err != nil {
627 0 : return err
628 0 : }
629 1 : if !objMeta.IsShared() {
630 0 : return errors.Wrapf(ErrInvalidSkipSharedIteration, "file %s is not shared", objMeta.DiskFileNum)
631 0 : }
632 1 : if !base.Visible(f.LargestSeqNum, seqNum, base.InternalKeySeqNumMax) {
633 1 : return errors.Wrapf(ErrInvalidSkipSharedIteration, "file %s contains keys newer than snapshot", objMeta.DiskFileNum)
634 1 : }
635 1 : var sst *SharedSSTMeta
636 1 : var skip bool
637 1 : sst, skip, err = iter.db.truncateSharedFile(ctx, lower, upper, level, f, objMeta)
638 1 : if err != nil {
639 0 : return err
640 0 : }
641 1 : if skip {
642 1 : continue
643 : }
644 1 : if err = opts.visitSharedFile(sst); err != nil {
645 0 : return err
646 0 : }
647 : }
648 : }
649 : }
650 :
651 1 : for valid := iter.seekGE(lower); valid && iter.error() == nil; valid = iter.next() {
652 1 : key := iter.unsafeKey()
653 1 :
654 1 : if opts.rateLimitFunc != nil {
655 0 : if err := opts.rateLimitFunc(key, iter.lazyValue()); err != nil {
656 0 : return err
657 0 : }
658 : }
659 :
660 1 : switch key.Kind() {
661 1 : case InternalKeyKindRangeKeyDelete, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeySet:
662 1 : if opts.visitRangeKey != nil {
663 1 : span := iter.unsafeSpan()
664 1 : if err := opts.visitRangeKey(span.Start, span.End, span.Keys); err != nil {
665 0 : return err
666 0 : }
667 : }
668 1 : case InternalKeyKindRangeDelete:
669 1 : if opts.visitRangeDel != nil {
670 1 : rangeDel := iter.unsafeRangeDel()
671 1 : if err := opts.visitRangeDel(rangeDel.Start, rangeDel.End, rangeDel.LargestSeqNum()); err != nil {
672 0 : return err
673 0 : }
674 : }
675 1 : default:
676 1 : if opts.visitPointKey != nil {
677 1 : var info IteratorLevel
678 1 : if len(iter.mergingIter.heap.items) > 0 {
679 1 : mergingIterIdx := iter.mergingIter.heap.items[0].index
680 1 : info = iter.iterLevels[mergingIterIdx]
681 1 : } else {
682 0 : info = IteratorLevel{Kind: IteratorLevelUnknown}
683 0 : }
684 1 : val := iter.lazyValue()
685 1 : if err := opts.visitPointKey(key, val, info); err != nil {
686 0 : return err
687 0 : }
688 : }
689 : }
690 : }
691 :
692 1 : return nil
693 : }
694 :
695 : // constructPointIter constructs a merging iterator and sets i.iter to it.
696 1 : func (i *scanInternalIterator) constructPointIter(memtables flushableList, buf *iterAlloc) {
697 1 : // Merging levels and levels from iterAlloc.
698 1 : mlevels := buf.mlevels[:0]
699 1 : levels := buf.levels[:0]
700 1 :
701 1 : // We compute the number of levels needed ahead of time and reallocate a slice if
702 1 : // the array from the iterAlloc isn't large enough. Doing this allocation once
703 1 : // should improve the performance.
704 1 : numMergingLevels := len(memtables)
705 1 : numLevelIters := 0
706 1 :
707 1 : current := i.version
708 1 : if current == nil {
709 1 : current = i.readState.current
710 1 : }
711 1 : numMergingLevels += len(current.L0SublevelFiles)
712 1 : numLevelIters += len(current.L0SublevelFiles)
713 1 :
714 1 : for level := 1; level < len(current.Levels); level++ {
715 1 : if current.Levels[level].Empty() {
716 1 : continue
717 : }
718 1 : if i.opts.skipSharedLevels && level >= sharedLevelsStart {
719 1 : continue
720 : }
721 1 : numMergingLevels++
722 1 : numLevelIters++
723 : }
724 :
725 1 : if numMergingLevels > cap(mlevels) {
726 0 : mlevels = make([]mergingIterLevel, 0, numMergingLevels)
727 0 : }
728 1 : if numLevelIters > cap(levels) {
729 0 : levels = make([]levelIter, 0, numLevelIters)
730 0 : }
731 : // TODO(bilal): Push these into the iterAlloc buf.
732 1 : var rangeDelMiter keyspan.MergingIter
733 1 : rangeDelIters := make([]keyspan.FragmentIterator, 0, numMergingLevels)
734 1 : rangeDelLevels := make([]keyspan.LevelIter, 0, numLevelIters)
735 1 :
736 1 : i.iterLevels = make([]IteratorLevel, numMergingLevels)
737 1 : mlevelsIndex := 0
738 1 :
739 1 : // Next are the memtables.
740 1 : for j := len(memtables) - 1; j >= 0; j-- {
741 1 : mem := memtables[j]
742 1 : mlevels = append(mlevels, mergingIterLevel{
743 1 : iter: mem.newIter(&i.opts.IterOptions),
744 1 : })
745 1 : i.iterLevels[mlevelsIndex] = IteratorLevel{
746 1 : Kind: IteratorLevelFlushable,
747 1 : FlushableIndex: j,
748 1 : }
749 1 : mlevelsIndex++
750 1 : if rdi := mem.newRangeDelIter(&i.opts.IterOptions); rdi != nil {
751 1 : rangeDelIters = append(rangeDelIters, rdi)
752 1 : }
753 : }
754 :
755 : // Next are the file levels: L0 sub-levels followed by lower levels.
756 1 : levelsIndex := len(levels)
757 1 : mlevels = mlevels[:numMergingLevels]
758 1 : levels = levels[:numLevelIters]
759 1 : rangeDelLevels = rangeDelLevels[:numLevelIters]
760 1 : i.opts.IterOptions.snapshotForHideObsoletePoints = i.seqNum
761 1 : addLevelIterForFiles := func(files manifest.LevelIterator, level manifest.Level) {
762 1 : li := &levels[levelsIndex]
763 1 : rli := &rangeDelLevels[levelsIndex]
764 1 :
765 1 : li.init(
766 1 : context.Background(), i.opts.IterOptions, i.comparer.Compare, i.comparer.Split, i.newIters, files, level,
767 1 : internalIterOpts{})
768 1 : li.initBoundaryContext(&mlevels[mlevelsIndex].levelIterBoundaryContext)
769 1 : mlevels[mlevelsIndex].iter = li
770 1 : rli.Init(keyspan.SpanIterOptions{RangeKeyFilters: i.opts.RangeKeyFilters},
771 1 : i.comparer.Compare, tableNewRangeDelIter(context.Background(), i.newIters), files, level,
772 1 : manifest.KeyTypePoint)
773 1 : rangeDelIters = append(rangeDelIters, rli)
774 1 :
775 1 : levelsIndex++
776 1 : mlevelsIndex++
777 1 : }
778 :
779 1 : for j := len(current.L0SublevelFiles) - 1; j >= 0; j-- {
780 1 : i.iterLevels[mlevelsIndex] = IteratorLevel{
781 1 : Kind: IteratorLevelLSM,
782 1 : Level: 0,
783 1 : Sublevel: j,
784 1 : }
785 1 : addLevelIterForFiles(current.L0SublevelFiles[j].Iter(), manifest.L0Sublevel(j))
786 1 : }
787 : // Add level iterators for the non-empty non-L0 levels.
788 1 : for level := 1; level < numLevels; level++ {
789 1 : if current.Levels[level].Empty() {
790 1 : continue
791 : }
792 1 : if i.opts.skipSharedLevels && level >= sharedLevelsStart {
793 1 : continue
794 : }
795 1 : i.iterLevels[mlevelsIndex] = IteratorLevel{Kind: IteratorLevelLSM, Level: level}
796 1 : addLevelIterForFiles(current.Levels[level].Iter(), manifest.Level(level))
797 : }
798 :
799 1 : buf.merging.init(&i.opts.IterOptions, &InternalIteratorStats{}, i.comparer.Compare, i.comparer.Split, mlevels...)
800 1 : buf.merging.snapshot = i.seqNum
801 1 : rangeDelMiter.Init(i.comparer.Compare, keyspan.VisibleTransform(i.seqNum), new(keyspan.MergingBuffers), rangeDelIters...)
802 1 :
803 1 : if i.opts.includeObsoleteKeys {
804 1 : iiter := &keyspan.InterleavingIter{}
805 1 : iiter.Init(i.comparer, &buf.merging, &rangeDelMiter,
806 1 : keyspan.InterleavingIterOpts{
807 1 : LowerBound: i.opts.LowerBound,
808 1 : UpperBound: i.opts.UpperBound,
809 1 : })
810 1 : i.pointKeyIter = iiter
811 1 : } else {
812 1 : pcIter := &pointCollapsingIterator{
813 1 : comparer: i.comparer,
814 1 : merge: i.merge,
815 1 : seqNum: i.seqNum,
816 1 : }
817 1 : pcIter.iter.Init(i.comparer, &buf.merging, &rangeDelMiter, keyspan.InterleavingIterOpts{
818 1 : LowerBound: i.opts.LowerBound,
819 1 : UpperBound: i.opts.UpperBound,
820 1 : })
821 1 : i.pointKeyIter = pcIter
822 1 : }
823 1 : i.iter = i.pointKeyIter
824 : }
825 :
826 : // constructRangeKeyIter constructs the range-key iterator stack, populating
827 : // i.rangeKey.rangeKeyIter with the resulting iterator. This is similar to
828 : // Iterator.constructRangeKeyIter, except it doesn't handle batches and ensures
829 : // iterConfig does *not* elide unsets/deletes.
830 1 : func (i *scanInternalIterator) constructRangeKeyIter() {
831 1 : // We want the bounded iter from iterConfig, but not the collapsing of
832 1 : // RangeKeyUnsets and RangeKeyDels.
833 1 : i.rangeKey.rangeKeyIter = i.rangeKey.iterConfig.Init(
834 1 : i.comparer, i.seqNum, i.opts.LowerBound, i.opts.UpperBound,
835 1 : nil /* hasPrefix */, nil /* prefix */, false, /* onlySets */
836 1 : &i.rangeKey.rangeKeyBuffers.internal)
837 1 :
838 1 : // Next are the flushables: memtables and large batches.
839 1 : if i.readState != nil {
840 1 : for j := len(i.readState.memtables) - 1; j >= 0; j-- {
841 1 : mem := i.readState.memtables[j]
842 1 : // We only need to read from memtables which contain sequence numbers older
843 1 : // than seqNum.
844 1 : if logSeqNum := mem.logSeqNum; logSeqNum >= i.seqNum {
845 1 : continue
846 : }
847 1 : if rki := mem.newRangeKeyIter(&i.opts.IterOptions); rki != nil {
848 1 : i.rangeKey.iterConfig.AddLevel(rki)
849 1 : }
850 : }
851 : }
852 :
853 1 : current := i.version
854 1 : if current == nil {
855 1 : current = i.readState.current
856 1 : }
857 : // Next are the file levels: L0 sub-levels followed by lower levels.
858 : //
859 : // Add file-specific iterators for L0 files containing range keys. This is less
860 : // efficient than using levelIters for sublevels of L0 files containing
861 : // range keys, but range keys are expected to be sparse anyway, reducing the
862 : // cost benefit of maintaining a separate L0Sublevels instance for range key
863 : // files and then using it here.
864 : //
865 : // NB: We iterate L0's files in reverse order. They're sorted by
866 : // LargestSeqNum ascending, and we need to add them to the merging iterator
867 : // in LargestSeqNum descending to preserve the merging iterator's invariants
868 : // around Key Trailer order.
869 1 : iter := current.RangeKeyLevels[0].Iter()
870 1 : for f := iter.Last(); f != nil; f = iter.Prev() {
871 1 : spanIter, err := i.newIterRangeKey(f, i.opts.SpanIterOptions())
872 1 : if err != nil {
873 0 : i.rangeKey.iterConfig.AddLevel(&errorKeyspanIter{err: err})
874 0 : continue
875 : }
876 1 : i.rangeKey.iterConfig.AddLevel(spanIter)
877 : }
878 :
879 : // Add level iterators for the non-empty non-L0 levels.
880 1 : for level := 1; level < len(current.RangeKeyLevels); level++ {
881 1 : if current.RangeKeyLevels[level].Empty() {
882 1 : continue
883 : }
884 1 : if i.opts.skipSharedLevels && level >= sharedLevelsStart {
885 1 : continue
886 : }
887 1 : li := i.rangeKey.iterConfig.NewLevelIter()
888 1 : spanIterOpts := i.opts.SpanIterOptions()
889 1 : li.Init(spanIterOpts, i.comparer.Compare, i.newIterRangeKey, current.RangeKeyLevels[level].Iter(),
890 1 : manifest.Level(level), manifest.KeyTypeRange)
891 1 : i.rangeKey.iterConfig.AddLevel(li)
892 : }
893 : }
894 :
895 : // seekGE seeks this iterator to the first key that's greater than or equal
896 : // to the specified user key.
897 1 : func (i *scanInternalIterator) seekGE(key []byte) bool {
898 1 : i.iterKey, i.iterValue = i.iter.SeekGE(key, base.SeekGEFlagsNone)
899 1 : return i.iterKey != nil
900 1 : }
901 :
902 : // unsafeKey returns the unsafe InternalKey at the current position. The value
903 : // is nil if the iterator is invalid or exhausted.
904 1 : func (i *scanInternalIterator) unsafeKey() *InternalKey {
905 1 : return i.iterKey
906 1 : }
907 :
908 : // lazyValue returns a value pointer to the value at the current iterator
909 : // position. Behaviour undefined if unsafeKey() returns a Range key or Rangedel
910 : // kind key.
911 1 : func (i *scanInternalIterator) lazyValue() LazyValue {
912 1 : return i.iterValue
913 1 : }
914 :
915 : // unsafeRangeDel returns a range key span. Behaviour undefined if UnsafeKey returns
916 : // a non-rangedel kind.
917 1 : func (i *scanInternalIterator) unsafeRangeDel() *keyspan.Span {
918 1 : type spanInternalIterator interface {
919 1 : Span() *keyspan.Span
920 1 : }
921 1 : return i.pointKeyIter.(spanInternalIterator).Span()
922 1 : }
923 :
924 : // unsafeSpan returns a range key span. Behaviour undefined if UnsafeKey returns
925 : // a non-rangekey type.
926 1 : func (i *scanInternalIterator) unsafeSpan() *keyspan.Span {
927 1 : return i.rangeKey.iiter.Span()
928 1 : }
929 :
930 : // next advances the iterator in the forward direction, and returns the
931 : // iterator's new validity state.
932 1 : func (i *scanInternalIterator) next() bool {
933 1 : i.iterKey, i.iterValue = i.iter.Next()
934 1 : return i.iterKey != nil
935 1 : }
936 :
937 : // error returns an error from the internal iterator, if there's any.
938 1 : func (i *scanInternalIterator) error() error {
939 1 : return i.iter.Error()
940 1 : }
941 :
942 : // close closes this iterator, and releases any pooled objects.
943 1 : func (i *scanInternalIterator) close() error {
944 1 : if err := i.iter.Close(); err != nil {
945 0 : return err
946 0 : }
947 1 : if i.readState != nil {
948 1 : i.readState.unref()
949 1 : }
950 1 : if i.version != nil {
951 1 : i.version.Unref()
952 1 : }
953 1 : if i.rangeKey != nil {
954 1 : i.rangeKey.PrepareForReuse()
955 1 : *i.rangeKey = iteratorRangeKeyState{
956 1 : rangeKeyBuffers: i.rangeKey.rangeKeyBuffers,
957 1 : }
958 1 : iterRangeKeyStateAllocPool.Put(i.rangeKey)
959 1 : i.rangeKey = nil
960 1 : }
961 1 : if alloc := i.alloc; alloc != nil {
962 1 : for j := range i.boundsBuf {
963 1 : if cap(i.boundsBuf[j]) >= maxKeyBufCacheSize {
964 0 : alloc.boundsBuf[j] = nil
965 1 : } else {
966 1 : alloc.boundsBuf[j] = i.boundsBuf[j]
967 1 : }
968 : }
969 1 : *alloc = iterAlloc{
970 1 : keyBuf: alloc.keyBuf[:0],
971 1 : boundsBuf: alloc.boundsBuf,
972 1 : prefixOrFullSeekKey: alloc.prefixOrFullSeekKey[:0],
973 1 : }
974 1 : iterAllocPool.Put(alloc)
975 1 : i.alloc = nil
976 : }
977 1 : return nil
978 : }
979 :
980 1 : func (i *scanInternalIterator) initializeBoundBufs(lower, upper []byte) {
981 1 : buf := i.boundsBuf[i.boundsBufIdx][:0]
982 1 : if lower != nil {
983 1 : buf = append(buf, lower...)
984 1 : i.opts.LowerBound = buf
985 1 : } else {
986 1 : i.opts.LowerBound = nil
987 1 : }
988 1 : if upper != nil {
989 1 : buf = append(buf, upper...)
990 1 : i.opts.UpperBound = buf[len(buf)-len(upper):]
991 1 : } else {
992 1 : i.opts.UpperBound = nil
993 1 : }
994 1 : i.boundsBuf[i.boundsBufIdx] = buf
995 1 : i.boundsBufIdx = 1 - i.boundsBufIdx
996 : }
|