Line data Source code
1 : // Copyright 2018 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package sstable
6 :
7 : import (
8 : "context"
9 : "encoding/binary"
10 : "unsafe"
11 :
12 : "github.com/cockroachdb/errors"
13 : "github.com/cockroachdb/pebble/internal/base"
14 : "github.com/cockroachdb/pebble/internal/invariants"
15 : "github.com/cockroachdb/pebble/internal/keyspan"
16 : "github.com/cockroachdb/pebble/internal/manual"
17 : "github.com/cockroachdb/pebble/internal/rangedel"
18 : "github.com/cockroachdb/pebble/internal/rangekey"
19 : )
20 :
21 1 : func uvarintLen(v uint32) int {
22 1 : i := 0
23 1 : for v >= 0x80 {
24 0 : v >>= 7
25 0 : i++
26 0 : }
27 1 : return i + 1
28 : }
29 :
30 : type blockWriter struct {
31 : restartInterval int
32 : nEntries int
33 : nextRestart int
34 : buf []byte
35 : // For datablocks in TableFormatPebblev3, we steal the most significant bit
36 : // in restarts for encoding setHasSameKeyPrefixSinceLastRestart. This leaves
37 : // us with 31 bits, which is more than enough (no one needs > 2GB blocks).
38 : // Typically, restarts occur every 16 keys, and by storing this bit with the
39 : // restart, we can optimize for the case where a user wants to skip to the
40 : // next prefix which happens to be in the same data block, but is > 16 keys
41 : // away. We have seen production situations with 100+ versions per MVCC key
42 : // (which share the same prefix). Additionally, for such writers, the prefix
43 : // compression of the key, that shares the key with the preceding key, is
44 : // limited to the prefix part of the preceding key -- this ensures that when
45 : // doing NPrefix (see blockIter) we don't need to assemble the full key
46 : // for each step since by limiting the length of the shared key we are
47 : // ensuring that any of the keys with the same prefix can be used to
48 : // assemble the full key when the prefix does change.
49 : restarts []uint32
50 : // Do not read curKey directly from outside blockWriter since it can have
51 : // the InternalKeyKindSSTableInternalObsoleteBit set. Use getCurKey() or
52 : // getCurUserKey() instead.
53 : curKey []byte
54 : // curValue excludes the optional prefix provided to
55 : // storeWithOptionalValuePrefix.
56 : curValue []byte
57 : prevKey []byte
58 : tmp [4]byte
59 : // We don't know the state of the sets that were at the end of the previous
60 : // block, so this is initially 0. It may be true for the second and later
61 : // restarts in a block. Not having inter-block information is fine since we
62 : // will optimize by stepping through restarts only within the same block.
63 : // Note that the first restart is the first key in the block.
64 : setHasSameKeyPrefixSinceLastRestart bool
65 : }
66 :
67 1 : func (w *blockWriter) clear() {
68 1 : *w = blockWriter{
69 1 : buf: w.buf[:0],
70 1 : restarts: w.restarts[:0],
71 1 : curKey: w.curKey[:0],
72 1 : curValue: w.curValue[:0],
73 1 : prevKey: w.prevKey[:0],
74 1 : }
75 1 : }
76 :
77 : // MaximumBlockSize is an extremely generous maximum block size of 256MiB. We
78 : // explicitly place this limit to reserve a few bits in the restart for
79 : // internal use.
80 : const MaximumBlockSize = 1 << 28
81 : const setHasSameKeyPrefixRestartMask uint32 = 1 << 31
82 : const restartMaskLittleEndianHighByteWithoutSetHasSamePrefix byte = 0b0111_1111
83 : const restartMaskLittleEndianHighByteOnlySetHasSamePrefix byte = 0b1000_0000
84 :
85 1 : func (w *blockWriter) getCurKey() InternalKey {
86 1 : k := base.DecodeInternalKey(w.curKey)
87 1 : k.Trailer = k.Trailer & trailerObsoleteMask
88 1 : return k
89 1 : }
90 :
91 1 : func (w *blockWriter) getCurUserKey() []byte {
92 1 : n := len(w.curKey) - base.InternalTrailerLen
93 1 : if n < 0 {
94 0 : panic(errors.AssertionFailedf("corrupt key in blockWriter buffer"))
95 : }
96 1 : return w.curKey[:n:n]
97 : }
98 :
99 : // If !addValuePrefix, the valuePrefix is ignored.
100 : func (w *blockWriter) storeWithOptionalValuePrefix(
101 : keySize int,
102 : value []byte,
103 : maxSharedKeyLen int,
104 : addValuePrefix bool,
105 : valuePrefix valuePrefix,
106 : setHasSameKeyPrefix bool,
107 1 : ) {
108 1 : shared := 0
109 1 : if !setHasSameKeyPrefix {
110 1 : w.setHasSameKeyPrefixSinceLastRestart = false
111 1 : }
112 1 : if w.nEntries == w.nextRestart {
113 1 : w.nextRestart = w.nEntries + w.restartInterval
114 1 : restart := uint32(len(w.buf))
115 1 : if w.setHasSameKeyPrefixSinceLastRestart {
116 1 : restart = restart | setHasSameKeyPrefixRestartMask
117 1 : }
118 1 : w.setHasSameKeyPrefixSinceLastRestart = true
119 1 : w.restarts = append(w.restarts, restart)
120 1 : } else {
121 1 : // TODO(peter): Manually inlined version of base.SharedPrefixLen(). This
122 1 : // is 3% faster on BenchmarkWriter on go1.16. Remove if future versions
123 1 : // show this to not be a performance win. For now, functions that use of
124 1 : // unsafe cannot be inlined.
125 1 : n := maxSharedKeyLen
126 1 : if n > len(w.prevKey) {
127 1 : n = len(w.prevKey)
128 1 : }
129 1 : asUint64 := func(b []byte, i int) uint64 {
130 1 : return binary.LittleEndian.Uint64(b[i:])
131 1 : }
132 1 : for shared < n-7 && asUint64(w.curKey, shared) == asUint64(w.prevKey, shared) {
133 1 : shared += 8
134 1 : }
135 1 : for shared < n && w.curKey[shared] == w.prevKey[shared] {
136 1 : shared++
137 1 : }
138 : }
139 :
140 1 : lenValuePlusOptionalPrefix := len(value)
141 1 : if addValuePrefix {
142 1 : lenValuePlusOptionalPrefix++
143 1 : }
144 1 : needed := 3*binary.MaxVarintLen32 + len(w.curKey[shared:]) + lenValuePlusOptionalPrefix
145 1 : n := len(w.buf)
146 1 : if cap(w.buf) < n+needed {
147 1 : newCap := 2 * cap(w.buf)
148 1 : if newCap == 0 {
149 1 : newCap = 1024
150 1 : }
151 1 : for newCap < n+needed {
152 1 : newCap *= 2
153 1 : }
154 1 : newBuf := make([]byte, n, newCap)
155 1 : copy(newBuf, w.buf)
156 1 : w.buf = newBuf
157 : }
158 1 : w.buf = w.buf[:n+needed]
159 1 :
160 1 : // TODO(peter): Manually inlined versions of binary.PutUvarint(). This is 15%
161 1 : // faster on BenchmarkWriter on go1.13. Remove if go1.14 or future versions
162 1 : // show this to not be a performance win.
163 1 : {
164 1 : x := uint32(shared)
165 1 : for x >= 0x80 {
166 0 : w.buf[n] = byte(x) | 0x80
167 0 : x >>= 7
168 0 : n++
169 0 : }
170 1 : w.buf[n] = byte(x)
171 1 : n++
172 : }
173 :
174 1 : {
175 1 : x := uint32(keySize - shared)
176 1 : for x >= 0x80 {
177 0 : w.buf[n] = byte(x) | 0x80
178 0 : x >>= 7
179 0 : n++
180 0 : }
181 1 : w.buf[n] = byte(x)
182 1 : n++
183 : }
184 :
185 1 : {
186 1 : x := uint32(lenValuePlusOptionalPrefix)
187 1 : for x >= 0x80 {
188 1 : w.buf[n] = byte(x) | 0x80
189 1 : x >>= 7
190 1 : n++
191 1 : }
192 1 : w.buf[n] = byte(x)
193 1 : n++
194 : }
195 :
196 1 : n += copy(w.buf[n:], w.curKey[shared:])
197 1 : if addValuePrefix {
198 1 : w.buf[n : n+1][0] = byte(valuePrefix)
199 1 : n++
200 1 : }
201 1 : n += copy(w.buf[n:], value)
202 1 : w.buf = w.buf[:n]
203 1 :
204 1 : w.curValue = w.buf[n-len(value):]
205 1 :
206 1 : w.nEntries++
207 : }
208 :
209 1 : func (w *blockWriter) add(key InternalKey, value []byte) {
210 1 : w.addWithOptionalValuePrefix(
211 1 : key, false, value, len(key.UserKey), false, 0, false)
212 1 : }
213 :
214 : // Callers that always set addValuePrefix to false should use add() instead.
215 : //
216 : // isObsolete indicates whether this key-value pair is obsolete in this
217 : // sstable (only applicable when writing data blocks) -- see the comment in
218 : // table.go and the longer one in format.go. addValuePrefix adds a 1 byte
219 : // prefix to the value, specified in valuePrefix -- this is used for data
220 : // blocks in TableFormatPebblev3 onwards for SETs (see the comment in
221 : // format.go, with more details in value_block.go). setHasSameKeyPrefix is
222 : // also used in TableFormatPebblev3 onwards for SETs.
223 : func (w *blockWriter) addWithOptionalValuePrefix(
224 : key InternalKey,
225 : isObsolete bool,
226 : value []byte,
227 : maxSharedKeyLen int,
228 : addValuePrefix bool,
229 : valuePrefix valuePrefix,
230 : setHasSameKeyPrefix bool,
231 1 : ) {
232 1 : w.curKey, w.prevKey = w.prevKey, w.curKey
233 1 :
234 1 : size := key.Size()
235 1 : if cap(w.curKey) < size {
236 1 : w.curKey = make([]byte, 0, size*2)
237 1 : }
238 1 : w.curKey = w.curKey[:size]
239 1 : if isObsolete {
240 1 : key.Trailer = key.Trailer | trailerObsoleteBit
241 1 : }
242 1 : key.Encode(w.curKey)
243 1 :
244 1 : w.storeWithOptionalValuePrefix(
245 1 : size, value, maxSharedKeyLen, addValuePrefix, valuePrefix, setHasSameKeyPrefix)
246 : }
247 :
248 1 : func (w *blockWriter) finish() []byte {
249 1 : // Write the restart points to the buffer.
250 1 : if w.nEntries == 0 {
251 1 : // Every block must have at least one restart point.
252 1 : if cap(w.restarts) > 0 {
253 1 : w.restarts = w.restarts[:1]
254 1 : w.restarts[0] = 0
255 1 : } else {
256 1 : w.restarts = append(w.restarts, 0)
257 1 : }
258 : }
259 1 : tmp4 := w.tmp[:4]
260 1 : for _, x := range w.restarts {
261 1 : binary.LittleEndian.PutUint32(tmp4, x)
262 1 : w.buf = append(w.buf, tmp4...)
263 1 : }
264 1 : binary.LittleEndian.PutUint32(tmp4, uint32(len(w.restarts)))
265 1 : w.buf = append(w.buf, tmp4...)
266 1 : result := w.buf
267 1 :
268 1 : // Reset the block state.
269 1 : w.nEntries = 0
270 1 : w.nextRestart = 0
271 1 : w.buf = w.buf[:0]
272 1 : w.restarts = w.restarts[:0]
273 1 : return result
274 : }
275 :
276 : // emptyBlockSize holds the size of an empty block. Every block ends
277 : // in a uint32 trailer encoding the number of restart points within the
278 : // block.
279 : const emptyBlockSize = 4
280 :
281 1 : func (w *blockWriter) estimatedSize() int {
282 1 : return len(w.buf) + 4*len(w.restarts) + emptyBlockSize
283 1 : }
284 :
285 : type blockEntry struct {
286 : offset int32
287 : keyStart int32
288 : keyEnd int32
289 : valStart int32
290 : valSize int32
291 : }
292 :
293 : // blockIter is an iterator over a single block of data.
294 : //
295 : // A blockIter provides an additional guarantee around key stability when a
296 : // block has a restart interval of 1 (i.e. when there is no prefix
297 : // compression). Key stability refers to whether the InternalKey.UserKey bytes
298 : // returned by a positioning call will remain stable after a subsequent
299 : // positioning call. The normal case is that a positioning call will invalidate
300 : // any previously returned InternalKey.UserKey. If a block has a restart
301 : // interval of 1 (no prefix compression), blockIter guarantees that
302 : // InternalKey.UserKey will point to the key as stored in the block itself
303 : // which will remain valid until the blockIter is closed. The key stability
304 : // guarantee is used by the range tombstone and range key code, which knows that
305 : // the respective blocks are always encoded with a restart interval of 1. This
306 : // per-block key stability guarantee is sufficient for range tombstones and
307 : // range deletes as they are always encoded in a single block. Note: this
308 : // stability guarantee no longer holds for a block iter with synthetic suffix
309 : // replacement, but this doesn't matter, as the user will not open
310 : // an iterator with a synthetic suffix on a block with rangekeys (for now).
311 : //
312 : // A blockIter also provides a value stability guarantee for range deletions and
313 : // range keys since there is only a single range deletion and range key block
314 : // per sstable and the blockIter will not release the bytes for the block until
315 : // it is closed.
316 : //
317 : // Note on why blockIter knows about lazyValueHandling:
318 : //
319 : // blockIter's positioning functions (that return a LazyValue), are too
320 : // complex to inline even prior to lazyValueHandling. blockIter.Next and
321 : // blockIter.First were by far the cheapest and had costs 195 and 180
322 : // respectively, which exceeds the budget of 80. We initially tried to keep
323 : // the lazyValueHandling logic out of blockIter by wrapping it with a
324 : // lazyValueDataBlockIter. singleLevelIter and twoLevelIter would use this
325 : // wrapped iter. The functions in lazyValueDataBlockIter were simple, in that
326 : // they called the corresponding blockIter func and then decided whether the
327 : // value was in fact in-place (so return immediately) or needed further
328 : // handling. But these also turned out too costly for mid-stack inlining since
329 : // simple calls like the following have a high cost that is barely under the
330 : // budget of 80
331 : //
332 : // k, v := i.data.SeekGE(key, flags) // cost 74
333 : // k, v := i.data.Next() // cost 72
334 : //
335 : // We have 2 options for minimizing performance regressions:
336 : // - Include the lazyValueHandling logic in the already non-inlineable
337 : // blockIter functions: Since most of the time is spent in data block iters,
338 : // it is acceptable to take the small hit of unnecessary branching (which
339 : // hopefully branch prediction will predict correctly) for other kinds of
340 : // blocks.
341 : // - Duplicate the logic of singleLevelIterator and twoLevelIterator for the
342 : // v3 sstable and only use the aforementioned lazyValueDataBlockIter for a
343 : // v3 sstable. We would want to manage these copies via code generation.
344 : //
345 : // We have picked the first option here.
346 : type blockIter struct {
347 : cmp Compare
348 : split Split
349 :
350 : // Iterator transforms.
351 : //
352 : // SyntheticSuffix, if not nil, will replace the decoded ikey.UserKey suffix
353 : // before the key is returned to the user. A sequence of iter operations on a
354 : // block with a syntheticSuffix rule should return keys as if those operations
355 : // ran on a block with keys that all had the syntheticSuffix. As an example:
356 : // any sequence of block iter cmds should return the same keys for the
357 : // following two blocks:
358 : //
359 : // blockA: a@3,b@3,c@3
360 : // blockB: a@1,b@2,c@1 with syntheticSuffix=3
361 : //
362 : // To ensure this, Suffix replacement will not change the ordering of keys in
363 : // the block because the iter assumes that no two keys in the block share the
364 : // same prefix. Furthermore, during SeekGE and SeekLT operations, the block
365 : // iterator handles "off by one" errors (explained in more detail in those
366 : // functions) when, for a given key, originalSuffix < searchSuffix <
367 : // replacementSuffix, with integer comparison. To handle these cases, the
368 : // iterator assumes:
369 : //
370 : // pebble.Compare(keyPrefix{replacementSuffix},keyPrefix{originalSuffix}) < 0
371 : // for keys with a suffix.
372 : //
373 : // NB: it is possible for a block iter to add a synthetic suffix on a key
374 : // without a suffix, which implies
375 : // pebble.Compare(keyPrefix{replacementSuffix},keyPrefix{noSuffix}) > 0 ,
376 : // however, the iterator would never need to handle an off by one error in
377 : // this case since originalSuffix (empty) > searchSuffix (non empty), with
378 : // integer comparison.
379 : //
380 : //
381 : // In addition, we also assume that any block with rangekeys will not contain
382 : // a synthetic suffix.
383 : transforms IterTransforms
384 :
385 : // offset is the byte index that marks where the current key/value is
386 : // encoded in the block.
387 : offset int32
388 : // nextOffset is the byte index where the next key/value is encoded in the
389 : // block.
390 : nextOffset int32
391 : // A "restart point" in a block is a point where the full key is encoded,
392 : // instead of just having a suffix of the key encoded. See readEntry() for
393 : // how prefix compression of keys works. Keys in between two restart points
394 : // only have a suffix encoded in the block. When restart interval is 1, no
395 : // prefix compression of keys happens. This is the case with range tombstone
396 : // blocks.
397 : //
398 : // All restart offsets are listed in increasing order in
399 : // i.ptr[i.restarts:len(block)-4], while numRestarts is encoded in the last
400 : // 4 bytes of the block as a uint32 (i.ptr[len(block)-4:]). i.restarts can
401 : // therefore be seen as the point where data in the block ends, and a list
402 : // of offsets of all restart points begins.
403 : restarts int32
404 : // Number of restart points in this block. Encoded at the end of the block
405 : // as a uint32.
406 : numRestarts int32
407 : ptr unsafe.Pointer
408 : data []byte
409 : // key contains the raw key the iterator is currently pointed at. This may
410 : // point directly to data stored in the block (for a key which has no prefix
411 : // compression), to fullKey (for a prefix compressed key), or to a slice of
412 : // data stored in cachedBuf (during reverse iteration).
413 : //
414 : // NB: In general, key contains the same logical content as ikey
415 : // (i.e. ikey = decode(key)), but if the iterator contains a synthetic suffix
416 : // replacement rule, this will not be the case. Therefore, key should never
417 : // be used after ikey is set.
418 : key []byte
419 : // fullKey is a buffer used for key prefix decompression.
420 : fullKey []byte
421 : // val contains the value the iterator is currently pointed at. If non-nil,
422 : // this points to a slice of the block data.
423 : val []byte
424 : // lazyValue is val turned into a LazyValue, whenever a positioning method
425 : // returns a non-nil key-value pair.
426 : lazyValue base.LazyValue
427 : // ikey contains the decoded InternalKey the iterator is currently pointed
428 : // at. Note that the memory backing ikey.UserKey is either data stored
429 : // directly in the block, fullKey, cachedBuf, or synthSuffixBuf. The key
430 : // stability guarantee for blocks built with a restart interval of 1 is
431 : // achieved by having ikey.UserKey always point to data stored directly in the
432 : // block.
433 : ikey InternalKey
434 : // cached and cachedBuf are used during reverse iteration. They are needed
435 : // because we can't perform prefix decoding in reverse, only in the forward
436 : // direction. In order to iterate in reverse, we decode and cache the entries
437 : // between two restart points.
438 : //
439 : // Note that cached[len(cached)-1] contains the previous entry to the one the
440 : // blockIter is currently pointed at. As usual, nextOffset will contain the
441 : // offset of the next entry. During reverse iteration, nextOffset will be
442 : // updated to point to offset, and we'll set the blockIter to point at the
443 : // entry cached[len(cached)-1]. See Prev() for more details.
444 : //
445 : // For a block encoded with a restart interval of 1, cached and cachedBuf
446 : // will not be used as there are no prefix compressed entries between the
447 : // restart points.
448 : cached []blockEntry
449 : cachedBuf []byte
450 : handle bufferHandle
451 : // for block iteration for already loaded blocks.
452 : firstUserKey []byte
453 : lazyValueHandling struct {
454 : vbr *valueBlockReader
455 : hasValuePrefix bool
456 : }
457 : synthSuffixBuf []byte
458 : }
459 :
460 : // blockIter implements the base.InternalIterator interface.
461 : var _ base.InternalIterator = (*blockIter)(nil)
462 :
463 : func newBlockIter(
464 : cmp Compare, split Split, block block, transforms IterTransforms,
465 1 : ) (*blockIter, error) {
466 1 : i := &blockIter{}
467 1 : return i, i.init(cmp, split, block, transforms)
468 1 : }
469 :
470 0 : func (i *blockIter) String() string {
471 0 : return "block"
472 0 : }
473 :
474 1 : func (i *blockIter) init(cmp Compare, split Split, block block, transforms IterTransforms) error {
475 1 : numRestarts := int32(binary.LittleEndian.Uint32(block[len(block)-4:]))
476 1 : if numRestarts == 0 {
477 0 : return base.CorruptionErrorf("pebble/table: invalid table (block has no restart points)")
478 0 : }
479 1 : i.transforms = transforms
480 1 : i.synthSuffixBuf = i.synthSuffixBuf[:0]
481 1 : i.split = split
482 1 : i.cmp = cmp
483 1 : i.restarts = int32(len(block)) - 4*(1+numRestarts)
484 1 : i.numRestarts = numRestarts
485 1 : i.ptr = unsafe.Pointer(&block[0])
486 1 : i.data = block
487 1 : i.fullKey = i.fullKey[:0]
488 1 : i.val = nil
489 1 : i.clearCache()
490 1 : if i.restarts > 0 {
491 1 : if err := i.readFirstKey(); err != nil {
492 0 : return err
493 0 : }
494 1 : } else {
495 1 : // Block is empty.
496 1 : i.firstUserKey = nil
497 1 : }
498 1 : return nil
499 : }
500 :
501 : // NB: two cases of hideObsoletePoints:
502 : // - Local sstable iteration: syntheticSeqNum will be set iff the sstable was
503 : // ingested.
504 : // - Foreign sstable iteration: syntheticSeqNum is always set.
505 : func (i *blockIter) initHandle(
506 : cmp Compare, split Split, block bufferHandle, transforms IterTransforms,
507 1 : ) error {
508 1 : i.handle.Release()
509 1 : i.handle = block
510 1 : return i.init(cmp, split, block.Get(), transforms)
511 1 : }
512 :
513 1 : func (i *blockIter) invalidate() {
514 1 : i.clearCache()
515 1 : i.offset = 0
516 1 : i.nextOffset = 0
517 1 : i.restarts = 0
518 1 : i.numRestarts = 0
519 1 : i.data = nil
520 1 : }
521 :
522 : // isDataInvalidated returns true when the blockIter has been invalidated
523 : // using an invalidate call. NB: this is different from blockIter.Valid
524 : // which is part of the InternalIterator implementation.
525 1 : func (i *blockIter) isDataInvalidated() bool {
526 1 : return i.data == nil
527 1 : }
528 :
529 1 : func (i *blockIter) resetForReuse() blockIter {
530 1 : return blockIter{
531 1 : fullKey: i.fullKey[:0],
532 1 : cached: i.cached[:0],
533 1 : cachedBuf: i.cachedBuf[:0],
534 1 : data: nil,
535 1 : }
536 1 : }
537 :
538 1 : func (i *blockIter) readEntry() {
539 1 : ptr := unsafe.Pointer(uintptr(i.ptr) + uintptr(i.offset))
540 1 :
541 1 : // This is an ugly performance hack. Reading entries from blocks is one of
542 1 : // the inner-most routines and decoding the 3 varints per-entry takes
543 1 : // significant time. Neither go1.11 or go1.12 will inline decodeVarint for
544 1 : // us, so we do it manually. This provides a 10-15% performance improvement
545 1 : // on blockIter benchmarks on both go1.11 and go1.12.
546 1 : //
547 1 : // TODO(peter): remove this hack if go:inline is ever supported.
548 1 :
549 1 : var shared uint32
550 1 : if a := *((*uint8)(ptr)); a < 128 {
551 1 : shared = uint32(a)
552 1 : ptr = unsafe.Pointer(uintptr(ptr) + 1)
553 1 : } else if a, b := a&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 1))); b < 128 {
554 0 : shared = uint32(b)<<7 | uint32(a)
555 0 : ptr = unsafe.Pointer(uintptr(ptr) + 2)
556 0 : } else if b, c := b&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 2))); c < 128 {
557 0 : shared = uint32(c)<<14 | uint32(b)<<7 | uint32(a)
558 0 : ptr = unsafe.Pointer(uintptr(ptr) + 3)
559 0 : } else if c, d := c&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 3))); d < 128 {
560 0 : shared = uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
561 0 : ptr = unsafe.Pointer(uintptr(ptr) + 4)
562 0 : } else {
563 0 : d, e := d&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 4)))
564 0 : shared = uint32(e)<<28 | uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
565 0 : ptr = unsafe.Pointer(uintptr(ptr) + 5)
566 0 : }
567 :
568 1 : var unshared uint32
569 1 : if a := *((*uint8)(ptr)); a < 128 {
570 1 : unshared = uint32(a)
571 1 : ptr = unsafe.Pointer(uintptr(ptr) + 1)
572 1 : } else if a, b := a&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 1))); b < 128 {
573 0 : unshared = uint32(b)<<7 | uint32(a)
574 0 : ptr = unsafe.Pointer(uintptr(ptr) + 2)
575 0 : } else if b, c := b&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 2))); c < 128 {
576 0 : unshared = uint32(c)<<14 | uint32(b)<<7 | uint32(a)
577 0 : ptr = unsafe.Pointer(uintptr(ptr) + 3)
578 0 : } else if c, d := c&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 3))); d < 128 {
579 0 : unshared = uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
580 0 : ptr = unsafe.Pointer(uintptr(ptr) + 4)
581 0 : } else {
582 0 : d, e := d&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 4)))
583 0 : unshared = uint32(e)<<28 | uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
584 0 : ptr = unsafe.Pointer(uintptr(ptr) + 5)
585 0 : }
586 :
587 1 : var value uint32
588 1 : if a := *((*uint8)(ptr)); a < 128 {
589 1 : value = uint32(a)
590 1 : ptr = unsafe.Pointer(uintptr(ptr) + 1)
591 1 : } else if a, b := a&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 1))); b < 128 {
592 1 : value = uint32(b)<<7 | uint32(a)
593 1 : ptr = unsafe.Pointer(uintptr(ptr) + 2)
594 1 : } else if b, c := b&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 2))); c < 128 {
595 0 : value = uint32(c)<<14 | uint32(b)<<7 | uint32(a)
596 0 : ptr = unsafe.Pointer(uintptr(ptr) + 3)
597 0 : } else if c, d := c&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 3))); d < 128 {
598 0 : value = uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
599 0 : ptr = unsafe.Pointer(uintptr(ptr) + 4)
600 0 : } else {
601 0 : d, e := d&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 4)))
602 0 : value = uint32(e)<<28 | uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
603 0 : ptr = unsafe.Pointer(uintptr(ptr) + 5)
604 0 : }
605 :
606 1 : unsharedKey := getBytes(ptr, int(unshared))
607 1 : // TODO(sumeer): move this into the else block below.
608 1 : i.fullKey = append(i.fullKey[:shared], unsharedKey...)
609 1 : if shared == 0 {
610 1 : // Provide stability for the key across positioning calls if the key
611 1 : // doesn't share a prefix with the previous key. This removes requiring the
612 1 : // key to be copied if the caller knows the block has a restart interval of
613 1 : // 1. An important example of this is range-del blocks.
614 1 : i.key = unsharedKey
615 1 : } else {
616 1 : i.key = i.fullKey
617 1 : }
618 1 : ptr = unsafe.Pointer(uintptr(ptr) + uintptr(unshared))
619 1 : i.val = getBytes(ptr, int(value))
620 1 : i.nextOffset = int32(uintptr(ptr)-uintptr(i.ptr)) + int32(value)
621 : }
622 :
623 1 : func (i *blockIter) readFirstKey() error {
624 1 : ptr := i.ptr
625 1 :
626 1 : // This is an ugly performance hack. Reading entries from blocks is one of
627 1 : // the inner-most routines and decoding the 3 varints per-entry takes
628 1 : // significant time. Neither go1.11 or go1.12 will inline decodeVarint for
629 1 : // us, so we do it manually. This provides a 10-15% performance improvement
630 1 : // on blockIter benchmarks on both go1.11 and go1.12.
631 1 : //
632 1 : // TODO(peter): remove this hack if go:inline is ever supported.
633 1 :
634 1 : if shared := *((*uint8)(ptr)); shared == 0 {
635 1 : ptr = unsafe.Pointer(uintptr(ptr) + 1)
636 1 : } else {
637 0 : // The shared length is != 0, which is invalid.
638 0 : panic("first key in block must have zero shared length")
639 : }
640 :
641 1 : var unshared uint32
642 1 : if a := *((*uint8)(ptr)); a < 128 {
643 1 : unshared = uint32(a)
644 1 : ptr = unsafe.Pointer(uintptr(ptr) + 1)
645 1 : } else if a, b := a&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 1))); b < 128 {
646 0 : unshared = uint32(b)<<7 | uint32(a)
647 0 : ptr = unsafe.Pointer(uintptr(ptr) + 2)
648 0 : } else if b, c := b&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 2))); c < 128 {
649 0 : unshared = uint32(c)<<14 | uint32(b)<<7 | uint32(a)
650 0 : ptr = unsafe.Pointer(uintptr(ptr) + 3)
651 0 : } else if c, d := c&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 3))); d < 128 {
652 0 : unshared = uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
653 0 : ptr = unsafe.Pointer(uintptr(ptr) + 4)
654 0 : } else {
655 0 : d, e := d&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 4)))
656 0 : unshared = uint32(e)<<28 | uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
657 0 : ptr = unsafe.Pointer(uintptr(ptr) + 5)
658 0 : }
659 :
660 : // Skip the value length.
661 1 : if a := *((*uint8)(ptr)); a < 128 {
662 1 : ptr = unsafe.Pointer(uintptr(ptr) + 1)
663 1 : } else if a := *((*uint8)(unsafe.Pointer(uintptr(ptr) + 1))); a < 128 {
664 1 : ptr = unsafe.Pointer(uintptr(ptr) + 2)
665 1 : } else if a := *((*uint8)(unsafe.Pointer(uintptr(ptr) + 2))); a < 128 {
666 0 : ptr = unsafe.Pointer(uintptr(ptr) + 3)
667 0 : } else if a := *((*uint8)(unsafe.Pointer(uintptr(ptr) + 3))); a < 128 {
668 0 : ptr = unsafe.Pointer(uintptr(ptr) + 4)
669 0 : } else {
670 0 : ptr = unsafe.Pointer(uintptr(ptr) + 5)
671 0 : }
672 :
673 1 : firstKey := getBytes(ptr, int(unshared))
674 1 : // Manually inlining base.DecodeInternalKey provides a 5-10% speedup on
675 1 : // BlockIter benchmarks.
676 1 : if n := len(firstKey) - 8; n >= 0 {
677 1 : i.firstUserKey = firstKey[:n:n]
678 1 : } else {
679 0 : i.firstUserKey = nil
680 0 : return base.CorruptionErrorf("pebble/table: invalid firstKey in block")
681 0 : }
682 1 : return nil
683 : }
684 :
685 : // The sstable internal obsolete bit is set when writing a block and unset by
686 : // blockIter, so no code outside block writing/reading code ever sees it.
687 : const trailerObsoleteBit = uint64(base.InternalKeyKindSSTableInternalObsoleteBit)
688 : const trailerObsoleteMask = (InternalKeySeqNumMax << 8) | uint64(base.InternalKeyKindSSTableInternalObsoleteMask)
689 :
690 1 : func (i *blockIter) decodeInternalKey(key []byte) (hiddenPoint bool) {
691 1 : // Manually inlining base.DecodeInternalKey provides a 5-10% speedup on
692 1 : // BlockIter benchmarks.
693 1 : if n := len(key) - 8; n >= 0 {
694 1 : trailer := binary.LittleEndian.Uint64(key[n:])
695 1 : hiddenPoint = i.transforms.HideObsoletePoints &&
696 1 : (trailer&trailerObsoleteBit != 0)
697 1 : i.ikey.Trailer = trailer & trailerObsoleteMask
698 1 : i.ikey.UserKey = key[:n:n]
699 1 : if n := i.transforms.SyntheticSeqNum; n != 0 {
700 1 : i.ikey.SetSeqNum(uint64(n))
701 1 : }
702 1 : } else {
703 1 : i.ikey.Trailer = uint64(InternalKeyKindInvalid)
704 1 : i.ikey.UserKey = nil
705 1 : }
706 1 : return hiddenPoint
707 : }
708 :
709 : // maybeReplaceSuffix replaces the suffix in i.ikey.UserKey with
710 : // i.transforms.syntheticSuffix. allowInPlace is set to false if there's a chance
711 : // that i.ikey.UserKey points to the same buffer as i.cachedBuf (i.e. during
712 : // reverse iteration).
713 1 : func (i *blockIter) maybeReplaceSuffix(allowInPlace bool) {
714 1 : if i.transforms.SyntheticSuffix != nil && i.ikey.UserKey != nil {
715 1 : prefixLen := i.split(i.ikey.UserKey)
716 1 : if allowInPlace && cap(i.ikey.UserKey) >= prefixLen+len(i.transforms.SyntheticSuffix) {
717 1 : i.ikey.UserKey = append(i.ikey.UserKey[:prefixLen], i.transforms.SyntheticSuffix...)
718 1 : return
719 1 : }
720 : // If ikey is cached or may get cached, we must copy
721 : // UserKey to a new buffer before prefix replacement.
722 1 : i.synthSuffixBuf = append(i.synthSuffixBuf[:0], i.ikey.UserKey[:prefixLen]...)
723 1 : i.synthSuffixBuf = append(i.synthSuffixBuf, i.transforms.SyntheticSuffix...)
724 1 : i.ikey.UserKey = i.synthSuffixBuf
725 : }
726 : }
727 :
728 1 : func (i *blockIter) clearCache() {
729 1 : i.cached = i.cached[:0]
730 1 : i.cachedBuf = i.cachedBuf[:0]
731 1 : }
732 :
733 1 : func (i *blockIter) cacheEntry() {
734 1 : var valStart int32
735 1 : valSize := int32(len(i.val))
736 1 : if valSize > 0 {
737 1 : valStart = int32(uintptr(unsafe.Pointer(&i.val[0])) - uintptr(i.ptr))
738 1 : }
739 :
740 1 : i.cached = append(i.cached, blockEntry{
741 1 : offset: i.offset,
742 1 : keyStart: int32(len(i.cachedBuf)),
743 1 : keyEnd: int32(len(i.cachedBuf) + len(i.key)),
744 1 : valStart: valStart,
745 1 : valSize: valSize,
746 1 : })
747 1 : i.cachedBuf = append(i.cachedBuf, i.key...)
748 : }
749 :
750 1 : func (i *blockIter) getFirstUserKey() []byte {
751 1 : return i.firstUserKey
752 1 : }
753 :
754 : // SeekGE implements internalIterator.SeekGE, as documented in the pebble
755 : // package.
756 1 : func (i *blockIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, base.LazyValue) {
757 1 : if invariants.Enabled && i.isDataInvalidated() {
758 0 : panic(errors.AssertionFailedf("invalidated blockIter used"))
759 : }
760 :
761 1 : i.clearCache()
762 1 : // Find the index of the smallest restart point whose key is > the key
763 1 : // sought; index will be numRestarts if there is no such restart point.
764 1 : i.offset = 0
765 1 : var index int32
766 1 :
767 1 : {
768 1 : // NB: manually inlined sort.Seach is ~5% faster.
769 1 : //
770 1 : // Define f(-1) == false and f(n) == true.
771 1 : // Invariant: f(index-1) == false, f(upper) == true.
772 1 : upper := i.numRestarts
773 1 : for index < upper {
774 1 : h := int32(uint(index+upper) >> 1) // avoid overflow when computing h
775 1 : // index ≤ h < upper
776 1 : offset := decodeRestart(i.data[i.restarts+4*h:])
777 1 : // For a restart point, there are 0 bytes shared with the previous key.
778 1 : // The varint encoding of 0 occupies 1 byte.
779 1 : ptr := unsafe.Pointer(uintptr(i.ptr) + uintptr(offset+1))
780 1 :
781 1 : // Decode the key at that restart point, and compare it to the key
782 1 : // sought. See the comment in readEntry for why we manually inline the
783 1 : // varint decoding.
784 1 : var v1 uint32
785 1 : if a := *((*uint8)(ptr)); a < 128 {
786 1 : v1 = uint32(a)
787 1 : ptr = unsafe.Pointer(uintptr(ptr) + 1)
788 1 : } else if a, b := a&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 1))); b < 128 {
789 0 : v1 = uint32(b)<<7 | uint32(a)
790 0 : ptr = unsafe.Pointer(uintptr(ptr) + 2)
791 0 : } else if b, c := b&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 2))); c < 128 {
792 0 : v1 = uint32(c)<<14 | uint32(b)<<7 | uint32(a)
793 0 : ptr = unsafe.Pointer(uintptr(ptr) + 3)
794 0 : } else if c, d := c&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 3))); d < 128 {
795 0 : v1 = uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
796 0 : ptr = unsafe.Pointer(uintptr(ptr) + 4)
797 0 : } else {
798 0 : d, e := d&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 4)))
799 0 : v1 = uint32(e)<<28 | uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
800 0 : ptr = unsafe.Pointer(uintptr(ptr) + 5)
801 0 : }
802 :
803 1 : if *((*uint8)(ptr)) < 128 {
804 1 : ptr = unsafe.Pointer(uintptr(ptr) + 1)
805 1 : } else if *((*uint8)(unsafe.Pointer(uintptr(ptr) + 1))) < 128 {
806 0 : ptr = unsafe.Pointer(uintptr(ptr) + 2)
807 0 : } else if *((*uint8)(unsafe.Pointer(uintptr(ptr) + 2))) < 128 {
808 0 : ptr = unsafe.Pointer(uintptr(ptr) + 3)
809 0 : } else if *((*uint8)(unsafe.Pointer(uintptr(ptr) + 3))) < 128 {
810 0 : ptr = unsafe.Pointer(uintptr(ptr) + 4)
811 0 : } else {
812 0 : ptr = unsafe.Pointer(uintptr(ptr) + 5)
813 0 : }
814 :
815 : // Manually inlining part of base.DecodeInternalKey provides a 5-10%
816 : // speedup on BlockIter benchmarks.
817 1 : s := getBytes(ptr, int(v1))
818 1 : var k []byte
819 1 : if n := len(s) - 8; n >= 0 {
820 1 : k = s[:n:n]
821 1 : }
822 : // Else k is invalid, and left as nil
823 :
824 1 : if i.cmp(key, k) > 0 {
825 1 : // The search key is greater than the user key at this restart point.
826 1 : // Search beyond this restart point, since we are trying to find the
827 1 : // first restart point with a user key >= the search key.
828 1 : index = h + 1 // preserves f(i-1) == false
829 1 : } else {
830 1 : // k >= search key, so prune everything after index (since index
831 1 : // satisfies the property we are looking for).
832 1 : upper = h // preserves f(j) == true
833 1 : }
834 : }
835 : // index == upper, f(index-1) == false, and f(upper) (= f(index)) == true
836 : // => answer is index.
837 : }
838 :
839 : // index is the first restart point with key >= search key. Define the keys
840 : // between a restart point and the next restart point as belonging to that
841 : // restart point.
842 : //
843 : // Since keys are strictly increasing, if index > 0 then the restart point
844 : // at index-1 will be the first one that has some keys belonging to it that
845 : // could be equal to the search key. If index == 0, then all keys in this
846 : // block are larger than the key sought, and offset remains at zero.
847 1 : if index > 0 {
848 1 : i.offset = decodeRestart(i.data[i.restarts+4*(index-1):])
849 1 : }
850 1 : i.readEntry()
851 1 : hiddenPoint := i.decodeInternalKey(i.key)
852 1 :
853 1 : // Iterate from that restart point to somewhere >= the key sought.
854 1 : if !i.valid() {
855 0 : return nil, base.LazyValue{}
856 0 : }
857 :
858 : // A note on seeking in a block with a suffix replacement rule: even though
859 : // the binary search above was conducted on keys without suffix replacement,
860 : // Seek will still return the correct suffix replaced key. A binary
861 : // search without suffix replacement will land on a key that is _less_ than
862 : // the key the search would have landed on if all keys were already suffix
863 : // replaced. Since Seek then conducts forward iteration to the first suffix
864 : // replaced user key that is greater than or equal to the search key, the
865 : // correct key is still returned.
866 : //
867 : // As an example, consider the following block with a restart interval of 1,
868 : // with a replacement suffix of "4":
869 : // - Pre-suffix replacement: apple@1, banana@3
870 : // - Post-suffix replacement: apple@4, banana@4
871 : //
872 : // Suppose the client seeks with apple@3. Assuming suffixes sort in reverse
873 : // chronological order (i.e. apple@1>apple@3), the binary search without
874 : // suffix replacement would return apple@1. A binary search with suffix
875 : // replacement would return banana@4. After beginning forward iteration from
876 : // either returned restart point, forward iteration would
877 : // always return the correct key, banana@4.
878 : //
879 : // Further, if the user searched with apple@0 (i.e. a suffix less than the
880 : // pre replacement suffix) or with apple@5 (a suffix larger than the post
881 : // replacement suffix), the binary search with or without suffix replacement
882 : // would land on the same key, as we assume the following:
883 : // (1) no two keys in the sst share the same prefix.
884 : // (2) pebble.Compare(replacementSuffix,originalSuffix) > 0
885 :
886 1 : i.maybeReplaceSuffix(true /*allowInPlace*/)
887 1 :
888 1 : if !hiddenPoint && i.cmp(i.ikey.UserKey, key) >= 0 {
889 1 : // Initialize i.lazyValue
890 1 : if !i.lazyValueHandling.hasValuePrefix ||
891 1 : base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet {
892 1 : i.lazyValue = base.MakeInPlaceValue(i.val)
893 1 : } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) {
894 1 : i.lazyValue = base.MakeInPlaceValue(i.val[1:])
895 1 : } else {
896 1 : i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val)
897 1 : }
898 1 : return &i.ikey, i.lazyValue
899 : }
900 1 : for i.Next(); i.valid(); i.Next() {
901 1 : if i.cmp(i.ikey.UserKey, key) >= 0 {
902 1 : // i.Next() has already initialized i.lazyValue.
903 1 : return &i.ikey, i.lazyValue
904 1 : }
905 : }
906 1 : return nil, base.LazyValue{}
907 : }
908 :
909 : // SeekPrefixGE implements internalIterator.SeekPrefixGE, as documented in the
910 : // pebble package.
911 : func (i *blockIter) SeekPrefixGE(
912 : prefix, key []byte, flags base.SeekGEFlags,
913 0 : ) (*base.InternalKey, base.LazyValue) {
914 0 : // This should never be called as prefix iteration is handled by sstable.Iterator.
915 0 : panic("pebble: SeekPrefixGE unimplemented")
916 : }
917 :
918 : // SeekLT implements internalIterator.SeekLT, as documented in the pebble
919 : // package.
920 1 : func (i *blockIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, base.LazyValue) {
921 1 : if invariants.Enabled && i.isDataInvalidated() {
922 0 : panic(errors.AssertionFailedf("invalidated blockIter used"))
923 : }
924 :
925 1 : i.clearCache()
926 1 : // Find the index of the smallest restart point whose key is >= the key
927 1 : // sought; index will be numRestarts if there is no such restart point.
928 1 : i.offset = 0
929 1 : var index int32
930 1 :
931 1 : {
932 1 : // NB: manually inlined sort.Search is ~5% faster.
933 1 : //
934 1 : // Define f(-1) == false and f(n) == true.
935 1 : // Invariant: f(index-1) == false, f(upper) == true.
936 1 : upper := i.numRestarts
937 1 : for index < upper {
938 1 : h := int32(uint(index+upper) >> 1) // avoid overflow when computing h
939 1 : // index ≤ h < upper
940 1 : offset := decodeRestart(i.data[i.restarts+4*h:])
941 1 : // For a restart point, there are 0 bytes shared with the previous key.
942 1 : // The varint encoding of 0 occupies 1 byte.
943 1 : ptr := unsafe.Pointer(uintptr(i.ptr) + uintptr(offset+1))
944 1 :
945 1 : // Decode the key at that restart point, and compare it to the key
946 1 : // sought. See the comment in readEntry for why we manually inline the
947 1 : // varint decoding.
948 1 : var v1 uint32
949 1 : if a := *((*uint8)(ptr)); a < 128 {
950 1 : v1 = uint32(a)
951 1 : ptr = unsafe.Pointer(uintptr(ptr) + 1)
952 1 : } else if a, b := a&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 1))); b < 128 {
953 0 : v1 = uint32(b)<<7 | uint32(a)
954 0 : ptr = unsafe.Pointer(uintptr(ptr) + 2)
955 0 : } else if b, c := b&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 2))); c < 128 {
956 0 : v1 = uint32(c)<<14 | uint32(b)<<7 | uint32(a)
957 0 : ptr = unsafe.Pointer(uintptr(ptr) + 3)
958 0 : } else if c, d := c&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 3))); d < 128 {
959 0 : v1 = uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
960 0 : ptr = unsafe.Pointer(uintptr(ptr) + 4)
961 0 : } else {
962 0 : d, e := d&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 4)))
963 0 : v1 = uint32(e)<<28 | uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
964 0 : ptr = unsafe.Pointer(uintptr(ptr) + 5)
965 0 : }
966 :
967 1 : if *((*uint8)(ptr)) < 128 {
968 1 : ptr = unsafe.Pointer(uintptr(ptr) + 1)
969 1 : } else if *((*uint8)(unsafe.Pointer(uintptr(ptr) + 1))) < 128 {
970 1 : ptr = unsafe.Pointer(uintptr(ptr) + 2)
971 1 : } else if *((*uint8)(unsafe.Pointer(uintptr(ptr) + 2))) < 128 {
972 0 : ptr = unsafe.Pointer(uintptr(ptr) + 3)
973 0 : } else if *((*uint8)(unsafe.Pointer(uintptr(ptr) + 3))) < 128 {
974 0 : ptr = unsafe.Pointer(uintptr(ptr) + 4)
975 0 : } else {
976 0 : ptr = unsafe.Pointer(uintptr(ptr) + 5)
977 0 : }
978 :
979 : // Manually inlining part of base.DecodeInternalKey provides a 5-10%
980 : // speedup on BlockIter benchmarks.
981 1 : s := getBytes(ptr, int(v1))
982 1 : var k []byte
983 1 : if n := len(s) - 8; n >= 0 {
984 1 : k = s[:n:n]
985 1 : }
986 : // Else k is invalid, and left as nil
987 :
988 1 : if i.cmp(key, k) > 0 {
989 1 : // The search key is greater than the user key at this restart point.
990 1 : // Search beyond this restart point, since we are trying to find the
991 1 : // first restart point with a user key >= the search key.
992 1 : index = h + 1 // preserves f(i-1) == false
993 1 : } else {
994 1 : // k >= search key, so prune everything after index (since index
995 1 : // satisfies the property we are looking for).
996 1 : upper = h // preserves f(j) == true
997 1 : }
998 : }
999 : // index == upper, f(index-1) == false, and f(upper) (= f(index)) == true
1000 : // => answer is index.
1001 : }
1002 :
1003 1 : if index == 0 {
1004 1 : if i.transforms.SyntheticSuffix != nil {
1005 1 : // The binary search was conducted on keys without suffix replacement,
1006 1 : // implying the first key in the block may be less than the search key. To
1007 1 : // double check, get the first key in the block with suffix replacement
1008 1 : // and compare to the search key. Consider the following example: suppose
1009 1 : // the user searches with a@3, the first key in the block is a@2 and the
1010 1 : // block contains a suffix replacement rule of 4. Since a@3 sorts before
1011 1 : // a@2, the binary search would return index==0. Without conducting the
1012 1 : // suffix replacement, the SeekLT would incorrectly return nil. With
1013 1 : // suffix replacement though, a@4 should be returned as a@4 sorts before
1014 1 : // a@3.
1015 1 : ikey, lazyVal := i.First()
1016 1 : if i.cmp(ikey.UserKey, key) < 0 {
1017 0 : return ikey, lazyVal
1018 0 : }
1019 : }
1020 : // If index == 0 then all keys in this block are larger than the key
1021 : // sought, so there is no match.
1022 1 : i.offset = -1
1023 1 : i.nextOffset = 0
1024 1 : return nil, base.LazyValue{}
1025 : }
1026 :
1027 : // INVARIANT: index > 0
1028 :
1029 : // Ignoring suffix replacement, index is the first restart point with key >=
1030 : // search key. Define the keys between a restart point and the next restart
1031 : // point as belonging to that restart point. Note that index could be equal to
1032 : // i.numRestarts, i.e., we are past the last restart. Since keys are strictly
1033 : // increasing, then the restart point at index-1 will be the first one that
1034 : // has some keys belonging to it that are less than the search key.
1035 : //
1036 : // Next, we will search between the restart at index-1 and the restart point
1037 : // at index, for the first key >= key, and then on finding it, return
1038 : // i.Prev(). We need to know when we have hit the offset for index, since then
1039 : // we can stop searching. targetOffset encodes that offset for index.
1040 1 : targetOffset := i.restarts
1041 1 : i.offset = decodeRestart(i.data[i.restarts+4*(index-1):])
1042 1 : if index < i.numRestarts {
1043 1 : targetOffset = decodeRestart(i.data[i.restarts+4*(index):])
1044 1 :
1045 1 : if i.transforms.SyntheticSuffix != nil {
1046 1 : // The binary search was conducted on keys without suffix replacement,
1047 1 : // implying the returned restart point (index) may be less than the search
1048 1 : // key, breaking the assumption described above.
1049 1 : //
1050 1 : // For example: consider this block with a replacement ts of 4, and
1051 1 : // restart interval of 1: - pre replacement: a@3,b@2,c@3 - post
1052 1 : // replacement: a@4,b@4,c@4
1053 1 : //
1054 1 : // Suppose the client calls SeekLT(b@3), SeekLT must return b@4.
1055 1 : //
1056 1 : // If the client calls SeekLT(b@3), the binary search would return b@2,
1057 1 : // the lowest key geq to b@3, pre-suffix replacement. Then, SeekLT will
1058 1 : // begin forward iteration from a@3, the previous restart point, to
1059 1 : // b{suffix}. The iteration stops when it encounters a key geq to the
1060 1 : // search key or if it reaches the upper bound. Without suffix
1061 1 : // replacement, we can assume that the upper bound of this forward
1062 1 : // iteration, b{suffix}, is greater than the search key, as implied by the
1063 1 : // binary search.
1064 1 : //
1065 1 : // If we naively hold this assumption with suffix replacement, the
1066 1 : // iteration would terminate at the upper bound, b@4, call i.Prev, and
1067 1 : // incorrectly return a@4. To correct for this, if the original returned
1068 1 : // index is less than the search key, shift our forward iteration to begin
1069 1 : // at index instead of index -1. With suffix replacement the key at index
1070 1 : // is guaranteed to be the highest restart point less than the seach key
1071 1 : // (i.e. the same property of index-1 for a block without suffix
1072 1 : // replacement). This property holds because of the invariant that a block
1073 1 : // with suffix replacement will not have two keys that share the same
1074 1 : // prefix. To consider the above example, binary searching with b@3 landed
1075 1 : // naively at a@3, but since b@4<b@3, we shift our forward iteration to
1076 1 : // begin at b@4. We never need to shift by more than one restart point
1077 1 : // (i.e. to c@4) because it's impossible for the search key to be greater
1078 1 : // than the the key at the next restart point in the block because that
1079 1 : // key will always have a different prefix. Put another way, because no
1080 1 : // key in the block shares the same prefix, naive binary search should
1081 1 : // always land at most 1 restart point off the correct one.
1082 1 :
1083 1 : naiveOffset := i.offset
1084 1 : // Shift up to the original binary search result and decode the key.
1085 1 : i.offset = targetOffset
1086 1 : i.readEntry()
1087 1 : i.decodeInternalKey(i.key)
1088 1 : i.maybeReplaceSuffix(false /* allowInPlace */)
1089 1 :
1090 1 : // If the binary search point is actually less than the search key, post
1091 1 : // replacement, bump the target offset.
1092 1 : if i.cmp(i.ikey.UserKey, key) < 0 {
1093 1 : i.offset = targetOffset
1094 1 : if index+1 < i.numRestarts {
1095 1 : // if index+1 is within the i.data bounds, use it to find the target
1096 1 : // offset.
1097 1 : targetOffset = decodeRestart(i.data[i.restarts+4*(index+1):])
1098 1 : } else {
1099 0 : targetOffset = i.restarts
1100 0 : }
1101 1 : } else {
1102 1 : i.offset = naiveOffset
1103 1 : }
1104 : }
1105 : }
1106 :
1107 : // Init nextOffset for the forward iteration below.
1108 1 : i.nextOffset = i.offset
1109 1 :
1110 1 : for {
1111 1 : i.offset = i.nextOffset
1112 1 : i.readEntry()
1113 1 : // When hidden keys are common, there is additional optimization possible
1114 1 : // by not caching entries that are hidden (note that some calls to
1115 1 : // cacheEntry don't decode the internal key before caching, but checking
1116 1 : // whether a key is hidden does not require full decoding). However, we do
1117 1 : // need to use the blockEntry.offset in the cache for the first entry at
1118 1 : // the reset point to do the binary search when the cache is empty -- so
1119 1 : // we would need to cache that first entry (though not the key) even if
1120 1 : // was hidden. Our current assumption is that if there are large numbers
1121 1 : // of hidden keys we will be able to skip whole blocks (using block
1122 1 : // property filters) so we don't bother optimizing.
1123 1 : hiddenPoint := i.decodeInternalKey(i.key)
1124 1 : i.maybeReplaceSuffix(false /*allowInPlace*/)
1125 1 :
1126 1 : // NB: we don't use the hiddenPoint return value of decodeInternalKey
1127 1 : // since we want to stop as soon as we reach a key >= ikey.UserKey, so
1128 1 : // that we can reverse.
1129 1 : if i.cmp(i.ikey.UserKey, key) >= 0 {
1130 1 : // The current key is greater than or equal to our search key. Back up to
1131 1 : // the previous key which was less than our search key. Note that this for
1132 1 : // loop will execute at least once with this if-block not being true, so
1133 1 : // the key we are backing up to is the last one this loop cached.
1134 1 : return i.Prev()
1135 1 : }
1136 :
1137 1 : if i.nextOffset >= targetOffset {
1138 1 : // We've reached the end of the current restart block. Return the
1139 1 : // current key if not hidden, else call Prev().
1140 1 : //
1141 1 : // When the restart interval is 1, the first iteration of the for loop
1142 1 : // will bring us here. In that case ikey is backed by the block so we
1143 1 : // get the desired key stability guarantee for the lifetime of the
1144 1 : // blockIter. That is, we never cache anything and therefore never
1145 1 : // return a key backed by cachedBuf.
1146 1 : if hiddenPoint {
1147 1 : return i.Prev()
1148 1 : }
1149 1 : break
1150 : }
1151 1 : i.cacheEntry()
1152 : }
1153 :
1154 1 : if !i.valid() {
1155 1 : return nil, base.LazyValue{}
1156 1 : }
1157 1 : if !i.lazyValueHandling.hasValuePrefix ||
1158 1 : base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet {
1159 1 : i.lazyValue = base.MakeInPlaceValue(i.val)
1160 1 : } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) {
1161 1 : i.lazyValue = base.MakeInPlaceValue(i.val[1:])
1162 1 : } else {
1163 1 : i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val)
1164 1 : }
1165 1 : return &i.ikey, i.lazyValue
1166 : }
1167 :
1168 : // First implements internalIterator.First, as documented in the pebble
1169 : // package.
1170 1 : func (i *blockIter) First() (*InternalKey, base.LazyValue) {
1171 1 : if invariants.Enabled && i.isDataInvalidated() {
1172 0 : panic(errors.AssertionFailedf("invalidated blockIter used"))
1173 : }
1174 :
1175 1 : i.offset = 0
1176 1 : if !i.valid() {
1177 1 : return nil, base.LazyValue{}
1178 1 : }
1179 1 : i.clearCache()
1180 1 : i.readEntry()
1181 1 : hiddenPoint := i.decodeInternalKey(i.key)
1182 1 : if hiddenPoint {
1183 1 : return i.Next()
1184 1 : }
1185 1 : i.maybeReplaceSuffix(true /*allowInPlace*/)
1186 1 : if !i.lazyValueHandling.hasValuePrefix ||
1187 1 : base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet {
1188 1 : i.lazyValue = base.MakeInPlaceValue(i.val)
1189 1 : } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) {
1190 1 : i.lazyValue = base.MakeInPlaceValue(i.val[1:])
1191 1 : } else {
1192 1 : i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val)
1193 1 : }
1194 1 : return &i.ikey, i.lazyValue
1195 : }
1196 :
1197 1 : func decodeRestart(b []byte) int32 {
1198 1 : _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
1199 1 : return int32(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 |
1200 1 : uint32(b[3]&restartMaskLittleEndianHighByteWithoutSetHasSamePrefix)<<24)
1201 1 : }
1202 :
1203 : // Last implements internalIterator.Last, as documented in the pebble package.
1204 1 : func (i *blockIter) Last() (*InternalKey, base.LazyValue) {
1205 1 : if invariants.Enabled && i.isDataInvalidated() {
1206 0 : panic(errors.AssertionFailedf("invalidated blockIter used"))
1207 : }
1208 :
1209 : // Seek forward from the last restart point.
1210 1 : i.offset = decodeRestart(i.data[i.restarts+4*(i.numRestarts-1):])
1211 1 : if !i.valid() {
1212 1 : return nil, base.LazyValue{}
1213 1 : }
1214 :
1215 1 : i.readEntry()
1216 1 : i.clearCache()
1217 1 :
1218 1 : for i.nextOffset < i.restarts {
1219 1 : i.cacheEntry()
1220 1 : i.offset = i.nextOffset
1221 1 : i.readEntry()
1222 1 : }
1223 :
1224 1 : hiddenPoint := i.decodeInternalKey(i.key)
1225 1 : if hiddenPoint {
1226 1 : return i.Prev()
1227 1 : }
1228 1 : i.maybeReplaceSuffix(false /*allowInPlace*/)
1229 1 : if !i.lazyValueHandling.hasValuePrefix ||
1230 1 : base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet {
1231 1 : i.lazyValue = base.MakeInPlaceValue(i.val)
1232 1 : } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) {
1233 1 : i.lazyValue = base.MakeInPlaceValue(i.val[1:])
1234 1 : } else {
1235 1 : i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val)
1236 1 : }
1237 1 : return &i.ikey, i.lazyValue
1238 : }
1239 :
1240 : // Next implements internalIterator.Next, as documented in the pebble
1241 : // package.
1242 1 : func (i *blockIter) Next() (*InternalKey, base.LazyValue) {
1243 1 : if len(i.cachedBuf) > 0 {
1244 1 : // We're switching from reverse iteration to forward iteration. We need to
1245 1 : // populate i.fullKey with the current key we're positioned at so that
1246 1 : // readEntry() can use i.fullKey for key prefix decompression. Note that we
1247 1 : // don't know whether i.key is backed by i.cachedBuf or i.fullKey (if
1248 1 : // SeekLT was the previous call, i.key may be backed by i.fullKey), but
1249 1 : // copying into i.fullKey works for both cases.
1250 1 : //
1251 1 : // TODO(peter): Rather than clearing the cache, we could instead use the
1252 1 : // cache until it is exhausted. This would likely be faster than falling
1253 1 : // through to the normal forward iteration code below.
1254 1 : i.fullKey = append(i.fullKey[:0], i.key...)
1255 1 : i.clearCache()
1256 1 : }
1257 :
1258 : start:
1259 1 : i.offset = i.nextOffset
1260 1 : if !i.valid() {
1261 1 : return nil, base.LazyValue{}
1262 1 : }
1263 1 : i.readEntry()
1264 1 : // Manually inlined version of i.decodeInternalKey(i.key).
1265 1 : if n := len(i.key) - 8; n >= 0 {
1266 1 : trailer := binary.LittleEndian.Uint64(i.key[n:])
1267 1 : hiddenPoint := i.transforms.HideObsoletePoints &&
1268 1 : (trailer&trailerObsoleteBit != 0)
1269 1 : i.ikey.Trailer = trailer & trailerObsoleteMask
1270 1 : i.ikey.UserKey = i.key[:n:n]
1271 1 : if n := i.transforms.SyntheticSeqNum; n != 0 {
1272 1 : i.ikey.SetSeqNum(uint64(n))
1273 1 : }
1274 1 : if hiddenPoint {
1275 1 : goto start
1276 : }
1277 1 : if i.transforms.SyntheticSuffix != nil {
1278 1 : // Inlined version of i.maybeReplaceSuffix(true /* allowInPlace */)
1279 1 : prefixLen := i.split(i.ikey.UserKey)
1280 1 : if cap(i.ikey.UserKey) >= prefixLen+len(i.transforms.SyntheticSuffix) {
1281 1 : i.ikey.UserKey = append(i.ikey.UserKey[:prefixLen], i.transforms.SyntheticSuffix...)
1282 1 : } else {
1283 1 : i.synthSuffixBuf = append(i.synthSuffixBuf[:0], i.ikey.UserKey[:prefixLen]...)
1284 1 : i.synthSuffixBuf = append(i.synthSuffixBuf, i.transforms.SyntheticSuffix...)
1285 1 : i.ikey.UserKey = i.synthSuffixBuf
1286 1 : }
1287 : }
1288 0 : } else {
1289 0 : i.ikey.Trailer = uint64(InternalKeyKindInvalid)
1290 0 : i.ikey.UserKey = nil
1291 0 : }
1292 1 : if !i.lazyValueHandling.hasValuePrefix ||
1293 1 : base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet {
1294 1 : i.lazyValue = base.MakeInPlaceValue(i.val)
1295 1 : } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) {
1296 1 : i.lazyValue = base.MakeInPlaceValue(i.val[1:])
1297 1 : } else {
1298 1 : i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val)
1299 1 : }
1300 1 : return &i.ikey, i.lazyValue
1301 : }
1302 :
1303 : // NextPrefix implements (base.InternalIterator).NextPrefix.
1304 1 : func (i *blockIter) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) {
1305 1 : if i.lazyValueHandling.hasValuePrefix {
1306 1 : return i.nextPrefixV3(succKey)
1307 1 : }
1308 1 : const nextsBeforeSeek = 3
1309 1 : k, v := i.Next()
1310 1 : for j := 1; k != nil && i.cmp(k.UserKey, succKey) < 0; j++ {
1311 1 : if j >= nextsBeforeSeek {
1312 1 : return i.SeekGE(succKey, base.SeekGEFlagsNone)
1313 1 : }
1314 1 : k, v = i.Next()
1315 : }
1316 1 : return k, v
1317 : }
1318 :
1319 1 : func (i *blockIter) nextPrefixV3(succKey []byte) (*InternalKey, base.LazyValue) {
1320 1 : // Doing nexts that involve a key comparison can be expensive (and the cost
1321 1 : // depends on the key length), so we use the same threshold of 3 that we use
1322 1 : // for TableFormatPebblev2 in blockIter.nextPrefix above. The next fast path
1323 1 : // that looks at setHasSamePrefix takes ~5ns per key, which is ~150x faster
1324 1 : // than doing a SeekGE within the block, so we do this 16 times
1325 1 : // (~5ns*16=80ns), and then switch to looking at restarts. Doing the binary
1326 1 : // search for the restart consumes > 100ns. If the number of versions is >
1327 1 : // 17, we will increment nextFastCount to 17, then do a binary search, and
1328 1 : // on average need to find a key between two restarts, so another 8 steps
1329 1 : // corresponding to nextFastCount, for a mean total of 17 + 8 = 25 such
1330 1 : // steps.
1331 1 : //
1332 1 : // TODO(sumeer): use the configured restartInterval for the sstable when it
1333 1 : // was written (which we don't currently store) instead of the default value
1334 1 : // of 16.
1335 1 : const nextCmpThresholdBeforeSeek = 3
1336 1 : const nextFastThresholdBeforeRestarts = 16
1337 1 : nextCmpCount := 0
1338 1 : nextFastCount := 0
1339 1 : usedRestarts := false
1340 1 : // INVARIANT: blockIter is valid.
1341 1 : if invariants.Enabled && !i.valid() {
1342 0 : panic(errors.AssertionFailedf("nextPrefixV3 called on invalid blockIter"))
1343 : }
1344 1 : prevKeyIsSet := i.ikey.Kind() == InternalKeyKindSet
1345 1 : for {
1346 1 : i.offset = i.nextOffset
1347 1 : if !i.valid() {
1348 1 : return nil, base.LazyValue{}
1349 1 : }
1350 : // Need to decode the length integers, so we can compute nextOffset.
1351 1 : ptr := unsafe.Pointer(uintptr(i.ptr) + uintptr(i.offset))
1352 1 : // This is an ugly performance hack. Reading entries from blocks is one of
1353 1 : // the inner-most routines and decoding the 3 varints per-entry takes
1354 1 : // significant time. Neither go1.11 or go1.12 will inline decodeVarint for
1355 1 : // us, so we do it manually. This provides a 10-15% performance improvement
1356 1 : // on blockIter benchmarks on both go1.11 and go1.12.
1357 1 : //
1358 1 : // TODO(peter): remove this hack if go:inline is ever supported.
1359 1 :
1360 1 : // Decode the shared key length integer.
1361 1 : var shared uint32
1362 1 : if a := *((*uint8)(ptr)); a < 128 {
1363 1 : shared = uint32(a)
1364 1 : ptr = unsafe.Pointer(uintptr(ptr) + 1)
1365 1 : } else if a, b := a&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 1))); b < 128 {
1366 0 : shared = uint32(b)<<7 | uint32(a)
1367 0 : ptr = unsafe.Pointer(uintptr(ptr) + 2)
1368 0 : } else if b, c := b&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 2))); c < 128 {
1369 0 : shared = uint32(c)<<14 | uint32(b)<<7 | uint32(a)
1370 0 : ptr = unsafe.Pointer(uintptr(ptr) + 3)
1371 0 : } else if c, d := c&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 3))); d < 128 {
1372 0 : shared = uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
1373 0 : ptr = unsafe.Pointer(uintptr(ptr) + 4)
1374 0 : } else {
1375 0 : d, e := d&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 4)))
1376 0 : shared = uint32(e)<<28 | uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
1377 0 : ptr = unsafe.Pointer(uintptr(ptr) + 5)
1378 0 : }
1379 : // Decode the unshared key length integer.
1380 1 : var unshared uint32
1381 1 : if a := *((*uint8)(ptr)); a < 128 {
1382 1 : unshared = uint32(a)
1383 1 : ptr = unsafe.Pointer(uintptr(ptr) + 1)
1384 1 : } else if a, b := a&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 1))); b < 128 {
1385 0 : unshared = uint32(b)<<7 | uint32(a)
1386 0 : ptr = unsafe.Pointer(uintptr(ptr) + 2)
1387 0 : } else if b, c := b&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 2))); c < 128 {
1388 0 : unshared = uint32(c)<<14 | uint32(b)<<7 | uint32(a)
1389 0 : ptr = unsafe.Pointer(uintptr(ptr) + 3)
1390 0 : } else if c, d := c&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 3))); d < 128 {
1391 0 : unshared = uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
1392 0 : ptr = unsafe.Pointer(uintptr(ptr) + 4)
1393 0 : } else {
1394 0 : d, e := d&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 4)))
1395 0 : unshared = uint32(e)<<28 | uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
1396 0 : ptr = unsafe.Pointer(uintptr(ptr) + 5)
1397 0 : }
1398 : // Decode the value length integer.
1399 1 : var value uint32
1400 1 : if a := *((*uint8)(ptr)); a < 128 {
1401 1 : value = uint32(a)
1402 1 : ptr = unsafe.Pointer(uintptr(ptr) + 1)
1403 1 : } else if a, b := a&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 1))); b < 128 {
1404 0 : value = uint32(b)<<7 | uint32(a)
1405 0 : ptr = unsafe.Pointer(uintptr(ptr) + 2)
1406 0 : } else if b, c := b&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 2))); c < 128 {
1407 0 : value = uint32(c)<<14 | uint32(b)<<7 | uint32(a)
1408 0 : ptr = unsafe.Pointer(uintptr(ptr) + 3)
1409 0 : } else if c, d := c&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 3))); d < 128 {
1410 0 : value = uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
1411 0 : ptr = unsafe.Pointer(uintptr(ptr) + 4)
1412 0 : } else {
1413 0 : d, e := d&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 4)))
1414 0 : value = uint32(e)<<28 | uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
1415 0 : ptr = unsafe.Pointer(uintptr(ptr) + 5)
1416 0 : }
1417 : // The starting position of the value.
1418 1 : valuePtr := unsafe.Pointer(uintptr(ptr) + uintptr(unshared))
1419 1 : i.nextOffset = int32(uintptr(valuePtr)-uintptr(i.ptr)) + int32(value)
1420 1 : if invariants.Enabled && unshared < 8 {
1421 0 : // This should not happen since only the key prefix is shared, so even
1422 0 : // if the prefix length is the same as the user key length, the unshared
1423 0 : // will include the trailer.
1424 0 : panic(errors.AssertionFailedf("unshared %d is too small", unshared))
1425 : }
1426 : // The trailer is written in little endian, so the key kind is the first
1427 : // byte in the trailer that is encoded in the slice [unshared-8:unshared].
1428 1 : keyKind := InternalKeyKind((*[manual.MaxArrayLen]byte)(ptr)[unshared-8])
1429 1 : keyKind = keyKind & base.InternalKeyKindSSTableInternalObsoleteMask
1430 1 : prefixChanged := false
1431 1 : if keyKind == InternalKeyKindSet {
1432 1 : if invariants.Enabled && value == 0 {
1433 0 : panic(errors.AssertionFailedf("value is of length 0, but we expect a valuePrefix"))
1434 : }
1435 1 : valPrefix := *((*valuePrefix)(valuePtr))
1436 1 : if setHasSamePrefix(valPrefix) {
1437 1 : // Fast-path. No need to assemble i.fullKey, or update i.key. We know
1438 1 : // that subsequent keys will not have a shared length that is greater
1439 1 : // than the prefix of the current key, which is also the prefix of
1440 1 : // i.key. Since we are continuing to iterate, we don't need to
1441 1 : // initialize i.ikey and i.lazyValue (these are initialized before
1442 1 : // returning).
1443 1 : nextFastCount++
1444 1 : if nextFastCount > nextFastThresholdBeforeRestarts {
1445 0 : if usedRestarts {
1446 0 : // Exhausted iteration budget. This will never happen unless
1447 0 : // someone is using a restart interval > 16. It is just to guard
1448 0 : // against long restart intervals causing too much iteration.
1449 0 : break
1450 : }
1451 : // Haven't used restarts yet, so find the first restart at or beyond
1452 : // the current offset.
1453 0 : targetOffset := i.offset
1454 0 : var index int32
1455 0 : {
1456 0 : // NB: manually inlined sort.Sort is ~5% faster.
1457 0 : //
1458 0 : // f defined for a restart point is true iff the offset >=
1459 0 : // targetOffset.
1460 0 : // Define f(-1) == false and f(i.numRestarts) == true.
1461 0 : // Invariant: f(index-1) == false, f(upper) == true.
1462 0 : upper := i.numRestarts
1463 0 : for index < upper {
1464 0 : h := int32(uint(index+upper) >> 1) // avoid overflow when computing h
1465 0 : // index ≤ h < upper
1466 0 : offset := decodeRestart(i.data[i.restarts+4*h:])
1467 0 : if offset < targetOffset {
1468 0 : index = h + 1 // preserves f(index-1) == false
1469 0 : } else {
1470 0 : upper = h // preserves f(upper) == true
1471 0 : }
1472 : }
1473 : // index == upper, f(index-1) == false, and f(upper) (= f(index)) == true
1474 : // => answer is index.
1475 : }
1476 0 : usedRestarts = true
1477 0 : nextFastCount = 0
1478 0 : if index == i.numRestarts {
1479 0 : // Already past the last real restart, so iterate a bit more until
1480 0 : // we are done with the block.
1481 0 : continue
1482 : }
1483 : // Have some real restarts after index. NB: index is the first
1484 : // restart at or beyond the current offset.
1485 0 : startingIndex := index
1486 0 : for index != i.numRestarts &&
1487 0 : // The restart at index is 4 bytes written in little endian format
1488 0 : // starting at i.restart+4*index. The 0th byte is the least
1489 0 : // significant and the 3rd byte is the most significant. Since the
1490 0 : // most significant bit of the 3rd byte is what we use for
1491 0 : // encoding the set-has-same-prefix information, the indexing
1492 0 : // below has +3.
1493 0 : i.data[i.restarts+4*index+3]&restartMaskLittleEndianHighByteOnlySetHasSamePrefix != 0 {
1494 0 : // We still have the same prefix, so move to the next restart.
1495 0 : index++
1496 0 : }
1497 : // index is the first restart that did not have the same prefix.
1498 0 : if index != startingIndex {
1499 0 : // Managed to skip past at least one restart. Resume iteration
1500 0 : // from index-1. Since nextFastCount has been reset to 0, we
1501 0 : // should be able to iterate to the next prefix.
1502 0 : i.offset = decodeRestart(i.data[i.restarts+4*(index-1):])
1503 0 : i.readEntry()
1504 0 : }
1505 : // Else, unable to skip past any restart. Resume iteration. Since
1506 : // nextFastCount has been reset to 0, we should be able to iterate
1507 : // to the next prefix.
1508 0 : continue
1509 : }
1510 1 : continue
1511 1 : } else if prevKeyIsSet {
1512 1 : prefixChanged = true
1513 1 : }
1514 1 : } else {
1515 1 : prevKeyIsSet = false
1516 1 : }
1517 : // Slow-path cases:
1518 : // - (Likely) The prefix has changed.
1519 : // - (Unlikely) The prefix has not changed.
1520 : // We assemble the key etc. under the assumption that it is the likely
1521 : // case.
1522 1 : unsharedKey := getBytes(ptr, int(unshared))
1523 1 : // TODO(sumeer): move this into the else block below. This is a bit tricky
1524 1 : // since the current logic assumes we have always copied the latest key
1525 1 : // into fullKey, which is why when we get to the next key we can (a)
1526 1 : // access i.fullKey[:shared], (b) append only the unsharedKey to
1527 1 : // i.fullKey. For (a), we can access i.key[:shared] since that memory is
1528 1 : // valid (even if unshared). For (b), we will need to remember whether
1529 1 : // i.key refers to i.fullKey or not, and can append the unsharedKey only
1530 1 : // in the former case and for the latter case need to copy the shared part
1531 1 : // too. This same comment applies to the other place where we can do this
1532 1 : // optimization, in readEntry().
1533 1 : i.fullKey = append(i.fullKey[:shared], unsharedKey...)
1534 1 : i.val = getBytes(valuePtr, int(value))
1535 1 : if shared == 0 {
1536 1 : // Provide stability for the key across positioning calls if the key
1537 1 : // doesn't share a prefix with the previous key. This removes requiring the
1538 1 : // key to be copied if the caller knows the block has a restart interval of
1539 1 : // 1. An important example of this is range-del blocks.
1540 1 : i.key = unsharedKey
1541 1 : } else {
1542 1 : i.key = i.fullKey
1543 1 : }
1544 : // Manually inlined version of i.decodeInternalKey(i.key).
1545 1 : hiddenPoint := false
1546 1 : if n := len(i.key) - 8; n >= 0 {
1547 1 : trailer := binary.LittleEndian.Uint64(i.key[n:])
1548 1 : hiddenPoint = i.transforms.HideObsoletePoints &&
1549 1 : (trailer&trailerObsoleteBit != 0)
1550 1 : i.ikey.Trailer = trailer & trailerObsoleteMask
1551 1 : i.ikey.UserKey = i.key[:n:n]
1552 1 : if n := i.transforms.SyntheticSeqNum; n != 0 {
1553 1 : i.ikey.SetSeqNum(uint64(n))
1554 1 : }
1555 1 : if i.transforms.SyntheticSuffix != nil {
1556 0 : // Inlined version of i.maybeReplaceSuffix(true /* allowInPlace */)
1557 0 : prefixLen := i.split(i.ikey.UserKey)
1558 0 : if cap(i.ikey.UserKey) >= prefixLen+len(i.transforms.SyntheticSuffix) {
1559 0 : i.ikey.UserKey = append(i.ikey.UserKey[:prefixLen], i.transforms.SyntheticSuffix...)
1560 0 : } else {
1561 0 : i.synthSuffixBuf = append(i.synthSuffixBuf[:0], i.ikey.UserKey[:prefixLen]...)
1562 0 : i.synthSuffixBuf = append(i.synthSuffixBuf, i.transforms.SyntheticSuffix...)
1563 0 : i.ikey.UserKey = i.synthSuffixBuf
1564 0 : }
1565 : }
1566 0 : } else {
1567 0 : i.ikey.Trailer = uint64(InternalKeyKindInvalid)
1568 0 : i.ikey.UserKey = nil
1569 0 : }
1570 1 : nextCmpCount++
1571 1 : if invariants.Enabled && prefixChanged && i.cmp(i.ikey.UserKey, succKey) < 0 {
1572 0 : panic(errors.AssertionFailedf("prefix should have changed but %x < %x",
1573 0 : i.ikey.UserKey, succKey))
1574 : }
1575 1 : if prefixChanged || i.cmp(i.ikey.UserKey, succKey) >= 0 {
1576 1 : // Prefix has changed.
1577 1 : if hiddenPoint {
1578 1 : return i.Next()
1579 1 : }
1580 1 : if invariants.Enabled && !i.lazyValueHandling.hasValuePrefix {
1581 0 : panic(errors.AssertionFailedf("nextPrefixV3 being run for non-v3 sstable"))
1582 : }
1583 1 : if base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet {
1584 1 : i.lazyValue = base.MakeInPlaceValue(i.val)
1585 1 : } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) {
1586 1 : i.lazyValue = base.MakeInPlaceValue(i.val[1:])
1587 1 : } else {
1588 0 : i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val)
1589 0 : }
1590 1 : return &i.ikey, i.lazyValue
1591 : }
1592 : // Else prefix has not changed.
1593 :
1594 1 : if nextCmpCount >= nextCmpThresholdBeforeSeek {
1595 1 : break
1596 : }
1597 : }
1598 1 : return i.SeekGE(succKey, base.SeekGEFlagsNone)
1599 : }
1600 :
1601 : // Prev implements internalIterator.Prev, as documented in the pebble
1602 : // package.
1603 1 : func (i *blockIter) Prev() (*InternalKey, base.LazyValue) {
1604 1 : start:
1605 1 : for n := len(i.cached) - 1; n >= 0; n-- {
1606 1 : i.nextOffset = i.offset
1607 1 : e := &i.cached[n]
1608 1 : i.offset = e.offset
1609 1 : i.val = getBytes(unsafe.Pointer(uintptr(i.ptr)+uintptr(e.valStart)), int(e.valSize))
1610 1 : // Manually inlined version of i.decodeInternalKey(i.key).
1611 1 : i.key = i.cachedBuf[e.keyStart:e.keyEnd]
1612 1 : if n := len(i.key) - 8; n >= 0 {
1613 1 : trailer := binary.LittleEndian.Uint64(i.key[n:])
1614 1 : hiddenPoint := i.transforms.HideObsoletePoints &&
1615 1 : (trailer&trailerObsoleteBit != 0)
1616 1 : if hiddenPoint {
1617 1 : continue
1618 : }
1619 1 : i.ikey.Trailer = trailer & trailerObsoleteMask
1620 1 : i.ikey.UserKey = i.key[:n:n]
1621 1 : if n := i.transforms.SyntheticSeqNum; n != 0 {
1622 1 : i.ikey.SetSeqNum(uint64(n))
1623 1 : }
1624 1 : if i.transforms.SyntheticSuffix != nil {
1625 1 : // Inlined version of i.maybeReplaceSuffix(false /* allowInPlace */)
1626 1 : prefixLen := i.split(i.ikey.UserKey)
1627 1 : // If ikey is cached or may get cached, we must de-reference
1628 1 : // UserKey before prefix replacement.
1629 1 : i.synthSuffixBuf = append(i.synthSuffixBuf[:0], i.ikey.UserKey[:prefixLen]...)
1630 1 : i.synthSuffixBuf = append(i.synthSuffixBuf, i.transforms.SyntheticSuffix...)
1631 1 : i.ikey.UserKey = i.synthSuffixBuf
1632 1 : }
1633 0 : } else {
1634 0 : i.ikey.Trailer = uint64(InternalKeyKindInvalid)
1635 0 : i.ikey.UserKey = nil
1636 0 : }
1637 1 : i.cached = i.cached[:n]
1638 1 : if !i.lazyValueHandling.hasValuePrefix ||
1639 1 : base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet {
1640 1 : i.lazyValue = base.MakeInPlaceValue(i.val)
1641 1 : } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) {
1642 1 : i.lazyValue = base.MakeInPlaceValue(i.val[1:])
1643 1 : } else {
1644 1 : i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val)
1645 1 : }
1646 1 : return &i.ikey, i.lazyValue
1647 : }
1648 :
1649 1 : i.clearCache()
1650 1 : if i.offset <= 0 {
1651 1 : i.offset = -1
1652 1 : i.nextOffset = 0
1653 1 : return nil, base.LazyValue{}
1654 1 : }
1655 :
1656 1 : targetOffset := i.offset
1657 1 : var index int32
1658 1 :
1659 1 : {
1660 1 : // NB: manually inlined sort.Sort is ~5% faster.
1661 1 : //
1662 1 : // Define f(-1) == false and f(n) == true.
1663 1 : // Invariant: f(index-1) == false, f(upper) == true.
1664 1 : upper := i.numRestarts
1665 1 : for index < upper {
1666 1 : h := int32(uint(index+upper) >> 1) // avoid overflow when computing h
1667 1 : // index ≤ h < upper
1668 1 : offset := decodeRestart(i.data[i.restarts+4*h:])
1669 1 : if offset < targetOffset {
1670 1 : // Looking for the first restart that has offset >= targetOffset, so
1671 1 : // ignore h and earlier.
1672 1 : index = h + 1 // preserves f(i-1) == false
1673 1 : } else {
1674 1 : upper = h // preserves f(j) == true
1675 1 : }
1676 : }
1677 : // index == upper, f(index-1) == false, and f(upper) (= f(index)) == true
1678 : // => answer is index.
1679 : }
1680 :
1681 : // index is first restart with offset >= targetOffset. Note that
1682 : // targetOffset may not be at a restart point since one can call Prev()
1683 : // after Next() (so the cache was not populated) and targetOffset refers to
1684 : // the current entry. index-1 must have an offset < targetOffset (it can't
1685 : // be equal to targetOffset since the binary search would have selected that
1686 : // as the index).
1687 1 : i.offset = 0
1688 1 : if index > 0 {
1689 1 : i.offset = decodeRestart(i.data[i.restarts+4*(index-1):])
1690 1 : }
1691 : // TODO(sumeer): why is the else case not an error given targetOffset is a
1692 : // valid offset.
1693 :
1694 1 : i.readEntry()
1695 1 :
1696 1 : // We stop when i.nextOffset == targetOffset since the targetOffset is the
1697 1 : // entry we are stepping back from, and we don't need to cache the entry
1698 1 : // before it, since it is the candidate to return.
1699 1 : for i.nextOffset < targetOffset {
1700 1 : i.cacheEntry()
1701 1 : i.offset = i.nextOffset
1702 1 : i.readEntry()
1703 1 : }
1704 :
1705 1 : hiddenPoint := i.decodeInternalKey(i.key)
1706 1 : if hiddenPoint {
1707 1 : // Use the cache.
1708 1 : goto start
1709 : }
1710 1 : if i.transforms.SyntheticSuffix != nil {
1711 1 : // Inlined version of i.maybeReplaceSuffix(false /* allowInPlace */)
1712 1 : prefixLen := i.split(i.ikey.UserKey)
1713 1 : // If ikey is cached or may get cached, we must de-reference
1714 1 : // UserKey before prefix replacement.
1715 1 : i.synthSuffixBuf = append(i.synthSuffixBuf[:0], i.ikey.UserKey[:prefixLen]...)
1716 1 : i.synthSuffixBuf = append(i.synthSuffixBuf, i.transforms.SyntheticSuffix...)
1717 1 : i.ikey.UserKey = i.synthSuffixBuf
1718 1 : }
1719 1 : if !i.lazyValueHandling.hasValuePrefix ||
1720 1 : base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet {
1721 1 : i.lazyValue = base.MakeInPlaceValue(i.val)
1722 1 : } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) {
1723 1 : i.lazyValue = base.MakeInPlaceValue(i.val[1:])
1724 1 : } else {
1725 1 : i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val)
1726 1 : }
1727 1 : return &i.ikey, i.lazyValue
1728 : }
1729 :
1730 : // Key implements internalIterator.Key, as documented in the pebble package.
1731 1 : func (i *blockIter) Key() *InternalKey {
1732 1 : return &i.ikey
1733 1 : }
1734 :
1735 1 : func (i *blockIter) value() base.LazyValue {
1736 1 : return i.lazyValue
1737 1 : }
1738 :
1739 : // Error implements internalIterator.Error, as documented in the pebble
1740 : // package.
1741 1 : func (i *blockIter) Error() error {
1742 1 : return nil // infallible
1743 1 : }
1744 :
1745 : // Close implements internalIterator.Close, as documented in the pebble
1746 : // package.
1747 1 : func (i *blockIter) Close() error {
1748 1 : i.handle.Release()
1749 1 : i.handle = bufferHandle{}
1750 1 : i.val = nil
1751 1 : i.lazyValue = base.LazyValue{}
1752 1 : i.lazyValueHandling.vbr = nil
1753 1 : return nil
1754 1 : }
1755 :
1756 0 : func (i *blockIter) SetBounds(lower, upper []byte) {
1757 0 : // This should never be called as bounds are handled by sstable.Iterator.
1758 0 : panic("pebble: SetBounds unimplemented")
1759 : }
1760 :
1761 0 : func (i *blockIter) SetContext(_ context.Context) {}
1762 :
1763 1 : func (i *blockIter) valid() bool {
1764 1 : return i.offset >= 0 && i.offset < i.restarts
1765 1 : }
1766 :
1767 : // fragmentBlockIter wraps a blockIter, implementing the
1768 : // keyspan.FragmentIterator interface. It's used for reading range deletion and
1769 : // range key blocks.
1770 : //
1771 : // Range deletions and range keys are fragmented before they're persisted to the
1772 : // block. Overlapping fragments have identical bounds. The fragmentBlockIter
1773 : // gathers all the fragments with identical bounds within a block and returns a
1774 : // single keyspan.Span describing all the keys defined over the span.
1775 : //
1776 : // # Memory lifetime
1777 : //
1778 : // A Span returned by fragmentBlockIter is only guaranteed to be stable until
1779 : // the next fragmentBlockIter iteration positioning method. A Span's Keys slice
1780 : // may be reused, so the user must not assume it's stable.
1781 : //
1782 : // Blocks holding range deletions and range keys are configured to use a restart
1783 : // interval of 1. This provides key stability. The caller may treat the various
1784 : // byte slices (start, end, suffix, value) as stable for the lifetime of the
1785 : // iterator.
1786 : type fragmentBlockIter struct {
1787 : blockIter blockIter
1788 : keyBuf [2]keyspan.Key
1789 : span keyspan.Span
1790 : dir int8
1791 : closeHook func(i keyspan.FragmentIterator) error
1792 :
1793 : // elideSameSeqnum, if true, returns only the first-occurring (in forward
1794 : // order) Key for each sequence number.
1795 : elideSameSeqnum bool
1796 : }
1797 :
1798 1 : func (i *fragmentBlockIter) resetForReuse() fragmentBlockIter {
1799 1 : return fragmentBlockIter{blockIter: i.blockIter.resetForReuse()}
1800 1 : }
1801 :
1802 1 : func (i *fragmentBlockIter) decodeSpanKeys(k *InternalKey, internalValue []byte) error {
1803 1 : // TODO(jackson): The use of i.span.Keys to accumulate keys across multiple
1804 1 : // calls to Decode is too confusing and subtle. Refactor to make it
1805 1 : // explicit.
1806 1 :
1807 1 : // decode the contents of the fragment's value. This always includes at
1808 1 : // least the end key: RANGEDELs store the end key directly as the value,
1809 1 : // whereas the various range key kinds store are more complicated. The
1810 1 : // details of the range key internal value format are documented within the
1811 1 : // internal/rangekey package.
1812 1 : var err error
1813 1 : switch k.Kind() {
1814 1 : case base.InternalKeyKindRangeDelete:
1815 1 : i.span = rangedel.Decode(*k, internalValue, i.span.Keys)
1816 1 : case base.InternalKeyKindRangeKeySet, base.InternalKeyKindRangeKeyUnset, base.InternalKeyKindRangeKeyDelete:
1817 1 : i.span, err = rangekey.Decode(*k, internalValue, i.span.Keys)
1818 0 : default:
1819 0 : i.span = keyspan.Span{}
1820 0 : err = base.CorruptionErrorf("pebble: corrupt keyspan fragment of kind %d", k.Kind())
1821 : }
1822 1 : return err
1823 : }
1824 :
1825 1 : func (i *fragmentBlockIter) elideKeysOfSameSeqNum() {
1826 1 : if invariants.Enabled {
1827 1 : if !i.elideSameSeqnum || len(i.span.Keys) == 0 {
1828 0 : panic("elideKeysOfSameSeqNum called when it should not be")
1829 : }
1830 : }
1831 1 : lastSeqNum := i.span.Keys[0].SeqNum()
1832 1 : k := 1
1833 1 : for j := 1; j < len(i.span.Keys); j++ {
1834 1 : if lastSeqNum != i.span.Keys[j].SeqNum() {
1835 1 : lastSeqNum = i.span.Keys[j].SeqNum()
1836 1 : i.span.Keys[k] = i.span.Keys[j]
1837 1 : k++
1838 1 : }
1839 : }
1840 1 : i.span.Keys = i.span.Keys[:k]
1841 : }
1842 :
1843 : // gatherForward gathers internal keys with identical bounds. Keys defined over
1844 : // spans of the keyspace are fragmented such that any overlapping key spans have
1845 : // identical bounds. When these spans are persisted to a range deletion or range
1846 : // key block, they may be persisted as multiple internal keys in order to encode
1847 : // multiple sequence numbers or key kinds.
1848 : //
1849 : // gatherForward iterates forward, re-combining the fragmented internal keys to
1850 : // reconstruct a keyspan.Span that holds all the keys defined over the span.
1851 : func (i *fragmentBlockIter) gatherForward(
1852 : k *InternalKey, lazyValue base.LazyValue,
1853 1 : ) (*keyspan.Span, error) {
1854 1 : i.span = keyspan.Span{}
1855 1 : if k == nil || !i.blockIter.valid() {
1856 1 : return nil, nil
1857 1 : }
1858 : // Use the i.keyBuf array to back the Keys slice to prevent an allocation
1859 : // when a span contains few keys.
1860 1 : i.span.Keys = i.keyBuf[:0]
1861 1 :
1862 1 : // Decode the span's end key and individual keys from the value.
1863 1 : internalValue := lazyValue.InPlaceValue()
1864 1 : if err := i.decodeSpanKeys(k, internalValue); err != nil {
1865 0 : return nil, err
1866 0 : }
1867 1 : prevEnd := i.span.End
1868 1 :
1869 1 : // There might exist additional internal keys with identical bounds encoded
1870 1 : // within the block. Iterate forward, accumulating all the keys with
1871 1 : // identical bounds to s.
1872 1 : k, lazyValue = i.blockIter.Next()
1873 1 : internalValue = lazyValue.InPlaceValue()
1874 1 : for k != nil && i.blockIter.cmp(k.UserKey, i.span.Start) == 0 {
1875 1 : if err := i.decodeSpanKeys(k, internalValue); err != nil {
1876 0 : return nil, err
1877 0 : }
1878 :
1879 : // Since k indicates an equal start key, the encoded end key must
1880 : // exactly equal the original end key from the first internal key.
1881 : // Overlapping fragments are required to have exactly equal start and
1882 : // end bounds.
1883 1 : if i.blockIter.cmp(prevEnd, i.span.End) != 0 {
1884 0 : i.span = keyspan.Span{}
1885 0 : return nil, base.CorruptionErrorf("pebble: corrupt keyspan fragmentation")
1886 0 : }
1887 1 : k, lazyValue = i.blockIter.Next()
1888 1 : internalValue = lazyValue.InPlaceValue()
1889 : }
1890 1 : if i.elideSameSeqnum && len(i.span.Keys) > 0 {
1891 1 : i.elideKeysOfSameSeqNum()
1892 1 : }
1893 : // i.blockIter is positioned over the first internal key for the next span.
1894 1 : return &i.span, nil
1895 : }
1896 :
1897 : // gatherBackward gathers internal keys with identical bounds. Keys defined over
1898 : // spans of the keyspace are fragmented such that any overlapping key spans have
1899 : // identical bounds. When these spans are persisted to a range deletion or range
1900 : // key block, they may be persisted as multiple internal keys in order to encode
1901 : // multiple sequence numbers or key kinds.
1902 : //
1903 : // gatherBackward iterates backwards, re-combining the fragmented internal keys
1904 : // to reconstruct a keyspan.Span that holds all the keys defined over the span.
1905 : func (i *fragmentBlockIter) gatherBackward(
1906 : k *InternalKey, lazyValue base.LazyValue,
1907 1 : ) (*keyspan.Span, error) {
1908 1 : i.span = keyspan.Span{}
1909 1 : if k == nil || !i.blockIter.valid() {
1910 1 : return nil, nil
1911 1 : }
1912 : // Use the i.keyBuf array to back the Keys slice to prevent an allocation
1913 : // when a span contains few keys.
1914 1 : i.span.Keys = i.keyBuf[:0]
1915 1 :
1916 1 : // Decode the span's end key and individual keys from the value.
1917 1 : internalValue := lazyValue.InPlaceValue()
1918 1 : if err := i.decodeSpanKeys(k, internalValue); err != nil {
1919 0 : return nil, err
1920 0 : }
1921 1 : prevEnd := i.span.End
1922 1 :
1923 1 : // There might exist additional internal keys with identical bounds encoded
1924 1 : // within the block. Iterate backward, accumulating all the keys with
1925 1 : // identical bounds to s.
1926 1 : k, lazyValue = i.blockIter.Prev()
1927 1 : internalValue = lazyValue.InPlaceValue()
1928 1 : for k != nil && i.blockIter.cmp(k.UserKey, i.span.Start) == 0 {
1929 1 : if err := i.decodeSpanKeys(k, internalValue); err != nil {
1930 0 : return nil, err
1931 0 : }
1932 :
1933 : // Since k indicates an equal start key, the encoded end key must
1934 : // exactly equal the original end key from the first internal key.
1935 : // Overlapping fragments are required to have exactly equal start and
1936 : // end bounds.
1937 1 : if i.blockIter.cmp(prevEnd, i.span.End) != 0 {
1938 0 : i.span = keyspan.Span{}
1939 0 : return nil, base.CorruptionErrorf("pebble: corrupt keyspan fragmentation")
1940 0 : }
1941 1 : k, lazyValue = i.blockIter.Prev()
1942 1 : internalValue = lazyValue.InPlaceValue()
1943 : }
1944 : // i.blockIter is positioned over the last internal key for the previous
1945 : // span.
1946 :
1947 : // Backwards iteration encounters internal keys in the wrong order.
1948 1 : keyspan.SortKeysByTrailer(&i.span.Keys)
1949 1 :
1950 1 : if i.elideSameSeqnum && len(i.span.Keys) > 0 {
1951 1 : i.elideKeysOfSameSeqNum()
1952 1 : }
1953 1 : return &i.span, nil
1954 : }
1955 :
1956 : // Close implements (keyspan.FragmentIterator).Close.
1957 1 : func (i *fragmentBlockIter) Close() error {
1958 1 : var err error
1959 1 : if i.closeHook != nil {
1960 0 : err = i.closeHook(i)
1961 0 : }
1962 1 : err = firstError(err, i.blockIter.Close())
1963 1 : return err
1964 : }
1965 :
1966 : // First implements (keyspan.FragmentIterator).First
1967 1 : func (i *fragmentBlockIter) First() (*keyspan.Span, error) {
1968 1 : i.dir = +1
1969 1 : return i.gatherForward(i.blockIter.First())
1970 1 : }
1971 :
1972 : // Last implements (keyspan.FragmentIterator).Last.
1973 1 : func (i *fragmentBlockIter) Last() (*keyspan.Span, error) {
1974 1 : i.dir = -1
1975 1 : return i.gatherBackward(i.blockIter.Last())
1976 1 : }
1977 :
1978 : // Next implements (keyspan.FragmentIterator).Next.
1979 1 : func (i *fragmentBlockIter) Next() (*keyspan.Span, error) {
1980 1 : switch {
1981 1 : case i.dir == -1 && !i.span.Valid():
1982 1 : // Switching directions.
1983 1 : //
1984 1 : // i.blockIter is exhausted, before the first key. Move onto the first.
1985 1 : i.blockIter.First()
1986 1 : i.dir = +1
1987 1 : case i.dir == -1 && i.span.Valid():
1988 1 : // Switching directions.
1989 1 : //
1990 1 : // i.blockIter is currently positioned over the last internal key for
1991 1 : // the previous span. Next it once to move to the first internal key
1992 1 : // that makes up the current span, and gatherForwaad to land on the
1993 1 : // first internal key making up the next span.
1994 1 : //
1995 1 : // In the diagram below, if the last span returned to the user during
1996 1 : // reverse iteration was [b,c), i.blockIter is currently positioned at
1997 1 : // [a,b). The block iter must be positioned over [d,e) to gather the
1998 1 : // next span's fragments.
1999 1 : //
2000 1 : // ... [a,b) [b,c) [b,c) [b,c) [d,e) ...
2001 1 : // ^ ^
2002 1 : // i.blockIter want
2003 1 : if x, err := i.gatherForward(i.blockIter.Next()); err != nil {
2004 0 : return nil, err
2005 1 : } else if invariants.Enabled && !x.Valid() {
2006 0 : panic("pebble: invariant violation: next entry unexpectedly invalid")
2007 : }
2008 1 : i.dir = +1
2009 : }
2010 : // We know that this blockIter has in-place values.
2011 1 : return i.gatherForward(&i.blockIter.ikey, base.MakeInPlaceValue(i.blockIter.val))
2012 : }
2013 :
2014 : // Prev implements (keyspan.FragmentIterator).Prev.
2015 1 : func (i *fragmentBlockIter) Prev() (*keyspan.Span, error) {
2016 1 : switch {
2017 1 : case i.dir == +1 && !i.span.Valid():
2018 1 : // Switching directions.
2019 1 : //
2020 1 : // i.blockIter is exhausted, after the last key. Move onto the last.
2021 1 : i.blockIter.Last()
2022 1 : i.dir = -1
2023 1 : case i.dir == +1 && i.span.Valid():
2024 1 : // Switching directions.
2025 1 : //
2026 1 : // i.blockIter is currently positioned over the first internal key for
2027 1 : // the next span. Prev it once to move to the last internal key that
2028 1 : // makes up the current span, and gatherBackward to land on the last
2029 1 : // internal key making up the previous span.
2030 1 : //
2031 1 : // In the diagram below, if the last span returned to the user during
2032 1 : // forward iteration was [b,c), i.blockIter is currently positioned at
2033 1 : // [d,e). The block iter must be positioned over [a,b) to gather the
2034 1 : // previous span's fragments.
2035 1 : //
2036 1 : // ... [a,b) [b,c) [b,c) [b,c) [d,e) ...
2037 1 : // ^ ^
2038 1 : // want i.blockIter
2039 1 : if x, err := i.gatherBackward(i.blockIter.Prev()); err != nil {
2040 0 : return nil, err
2041 1 : } else if invariants.Enabled && !x.Valid() {
2042 0 : panic("pebble: invariant violation: previous entry unexpectedly invalid")
2043 : }
2044 1 : i.dir = -1
2045 : }
2046 : // We know that this blockIter has in-place values.
2047 1 : return i.gatherBackward(&i.blockIter.ikey, base.MakeInPlaceValue(i.blockIter.val))
2048 : }
2049 :
2050 : // SeekGE implements (keyspan.FragmentIterator).SeekGE.
2051 1 : func (i *fragmentBlockIter) SeekGE(k []byte) (*keyspan.Span, error) {
2052 1 : if s, err := i.SeekLT(k); err != nil {
2053 0 : return nil, err
2054 1 : } else if s != nil && i.blockIter.cmp(k, s.End) < 0 {
2055 1 : return s, nil
2056 1 : }
2057 : // TODO(jackson): If the above i.SeekLT(k) discovers a span but the span
2058 : // doesn't meet the k < s.End comparison, then there's no need for the
2059 : // SeekLT to gatherBackward.
2060 1 : return i.Next()
2061 : }
2062 :
2063 : // SeekLT implements (keyspan.FragmentIterator).SeekLT.
2064 1 : func (i *fragmentBlockIter) SeekLT(k []byte) (*keyspan.Span, error) {
2065 1 : i.dir = -1
2066 1 : return i.gatherBackward(i.blockIter.SeekLT(k, base.SeekLTFlagsNone))
2067 1 : }
2068 :
2069 : // String implements fmt.Stringer.
2070 0 : func (i *fragmentBlockIter) String() string {
2071 0 : return "fragment-block-iter"
2072 0 : }
2073 :
2074 : // SetCloseHook implements sstable.FragmentIterator.
2075 0 : func (i *fragmentBlockIter) SetCloseHook(fn func(i keyspan.FragmentIterator) error) {
2076 0 : i.closeHook = fn
2077 0 : }
2078 :
2079 : // WrapChildren implements FragmentIterator.
2080 0 : func (i *fragmentBlockIter) WrapChildren(wrap keyspan.WrapFn) {}
|