Line data Source code
1 : // Copyright 2012 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package pebble
6 :
7 : import (
8 : "context"
9 : "encoding/binary"
10 : "fmt"
11 : "io"
12 : "math"
13 : "sort"
14 : "sync"
15 : "sync/atomic"
16 : "time"
17 : "unsafe"
18 :
19 : "github.com/cockroachdb/errors"
20 : "github.com/cockroachdb/pebble/internal/base"
21 : "github.com/cockroachdb/pebble/internal/batchskl"
22 : "github.com/cockroachdb/pebble/internal/humanize"
23 : "github.com/cockroachdb/pebble/internal/keyspan"
24 : "github.com/cockroachdb/pebble/internal/private"
25 : "github.com/cockroachdb/pebble/internal/rangedel"
26 : "github.com/cockroachdb/pebble/internal/rangekey"
27 : "github.com/cockroachdb/pebble/internal/rawalloc"
28 : )
29 :
30 : const (
31 : batchCountOffset = 8
32 : batchHeaderLen = 12
33 : batchInitialSize = 1 << 10 // 1 KB
34 : batchMaxRetainedSize = 1 << 20 // 1 MB
35 : invalidBatchCount = 1<<32 - 1
36 : maxVarintLen32 = 5
37 : )
38 :
39 : // ErrNotIndexed means that a read operation on a batch failed because the
40 : // batch is not indexed and thus doesn't support reads.
41 : var ErrNotIndexed = errors.New("pebble: batch not indexed")
42 :
43 : // ErrInvalidBatch indicates that a batch is invalid or otherwise corrupted.
44 : var ErrInvalidBatch = errors.New("pebble: invalid batch")
45 :
46 : // ErrBatchTooLarge indicates that a batch is invalid or otherwise corrupted.
47 : var ErrBatchTooLarge = errors.Newf("pebble: batch too large: >= %s", humanize.Bytes.Uint64(maxBatchSize))
48 :
49 : // DeferredBatchOp represents a batch operation (eg. set, merge, delete) that is
50 : // being inserted into the batch. Indexing is not performed on the specified key
51 : // until Finish is called, hence the name deferred. This struct lets the caller
52 : // copy or encode keys/values directly into the batch representation instead of
53 : // copying into an intermediary buffer then having pebble.Batch copy off of it.
54 : type DeferredBatchOp struct {
55 : index *batchskl.Skiplist
56 :
57 : // Key and Value point to parts of the binary batch representation where
58 : // keys and values should be encoded/copied into. len(Key) and len(Value)
59 : // bytes must be copied into these slices respectively before calling
60 : // Finish(). Changing where these slices point to is not allowed.
61 : Key, Value []byte
62 : offset uint32
63 : }
64 :
65 : // Finish completes the addition of this batch operation, and adds it to the
66 : // index if necessary. Must be called once (and exactly once) keys/values
67 : // have been filled into Key and Value. Not calling Finish or not
68 : // copying/encoding keys will result in an incomplete index, and calling Finish
69 : // twice may result in a panic.
70 1 : func (d DeferredBatchOp) Finish() error {
71 1 : if d.index != nil {
72 0 : if err := d.index.Add(d.offset); err != nil {
73 0 : return err
74 0 : }
75 : }
76 1 : return nil
77 : }
78 :
79 : // A Batch is a sequence of Sets, Merges, Deletes, DeleteRanges, RangeKeySets,
80 : // RangeKeyUnsets, and/or RangeKeyDeletes that are applied atomically. Batch
81 : // implements the Reader interface, but only an indexed batch supports reading
82 : // (without error) via Get or NewIter. A non-indexed batch will return
83 : // ErrNotIndexed when read from. A batch is not safe for concurrent use, and
84 : // consumers should use a batch per goroutine or provide their own
85 : // synchronization.
86 : //
87 : // # Indexing
88 : //
89 : // Batches can be optionally indexed (see DB.NewIndexedBatch). An indexed batch
90 : // allows iteration via an Iterator (see Batch.NewIter). The iterator provides
91 : // a merged view of the operations in the batch and the underlying
92 : // database. This is implemented by treating the batch as an additional layer
93 : // in the LSM where every entry in the batch is considered newer than any entry
94 : // in the underlying database (batch entries have the InternalKeySeqNumBatch
95 : // bit set). By treating the batch as an additional layer in the LSM, iteration
96 : // supports all batch operations (i.e. Set, Merge, Delete, DeleteRange,
97 : // RangeKeySet, RangeKeyUnset, RangeKeyDelete) with minimal effort.
98 : //
99 : // The same key can be operated on multiple times in a batch, though only the
100 : // latest operation will be visible. For example, Put("a", "b"), Delete("a")
101 : // will cause the key "a" to not be visible in the batch. Put("a", "b"),
102 : // Put("a", "c") will cause a read of "a" to return the value "c".
103 : //
104 : // The batch index is implemented via an skiplist (internal/batchskl). While
105 : // the skiplist implementation is very fast, inserting into an indexed batch is
106 : // significantly slower than inserting into a non-indexed batch. Only use an
107 : // indexed batch if you require reading from it.
108 : //
109 : // # Atomic commit
110 : //
111 : // The operations in a batch are persisted by calling Batch.Commit which is
112 : // equivalent to calling DB.Apply(batch). A batch is committed atomically by
113 : // writing the internal batch representation to the WAL, adding all of the
114 : // batch operations to the memtable associated with the WAL, and then
115 : // incrementing the visible sequence number so that subsequent reads can see
116 : // the effects of the batch operations. If WriteOptions.Sync is true, a call to
117 : // Batch.Commit will guarantee that the batch is persisted to disk before
118 : // returning. See commitPipeline for more on the implementation details.
119 : //
120 : // # Large batches
121 : //
122 : // The size of a batch is limited only by available memory (be aware that
123 : // indexed batches require considerably additional memory for the skiplist
124 : // structure). A given WAL file has a single memtable associated with it (this
125 : // restriction could be removed, but doing so is onerous and complex). And a
126 : // memtable has a fixed size due to the underlying fixed size arena. Note that
127 : // this differs from RocksDB where a memtable can grow arbitrarily large using
128 : // a list of arena chunks. In RocksDB this is accomplished by storing pointers
129 : // in the arena memory, but that isn't possible in Go.
130 : //
131 : // During Batch.Commit, a batch which is larger than a threshold (>
132 : // MemTableSize/2) is wrapped in a flushableBatch and inserted into the queue
133 : // of memtables. A flushableBatch forces WAL to be rotated, but that happens
134 : // anyways when the memtable becomes full so this does not cause significant
135 : // WAL churn. Because the flushableBatch is readable as another layer in the
136 : // LSM, Batch.Commit returns as soon as the flushableBatch has been added to
137 : // the queue of memtables.
138 : //
139 : // Internally, a flushableBatch provides Iterator support by sorting the batch
140 : // contents (the batch is sorted once, when it is added to the memtable
141 : // queue). Sorting the batch contents and insertion of the contents into a
142 : // memtable have the same big-O time, but the constant factor dominates
143 : // here. Sorting is significantly faster and uses significantly less memory.
144 : //
145 : // # Internal representation
146 : //
147 : // The internal batch representation is a contiguous byte buffer with a fixed
148 : // 12-byte header, followed by a series of records.
149 : //
150 : // +-------------+------------+--- ... ---+
151 : // | SeqNum (8B) | Count (4B) | Entries |
152 : // +-------------+------------+--- ... ---+
153 : //
154 : // Each record has a 1-byte kind tag prefix, followed by 1 or 2 length prefixed
155 : // strings (varstring):
156 : //
157 : // +-----------+-----------------+-------------------+
158 : // | Kind (1B) | Key (varstring) | Value (varstring) |
159 : // +-----------+-----------------+-------------------+
160 : //
161 : // A varstring is a varint32 followed by N bytes of data. The Kind tags are
162 : // exactly those specified by InternalKeyKind. The following table shows the
163 : // format for records of each kind:
164 : //
165 : // InternalKeyKindDelete varstring
166 : // InternalKeyKindLogData varstring
167 : // InternalKeyKindIngestSST varstring
168 : // InternalKeyKindSet varstring varstring
169 : // InternalKeyKindMerge varstring varstring
170 : // InternalKeyKindRangeDelete varstring varstring
171 : // InternalKeyKindRangeKeySet varstring varstring
172 : // InternalKeyKindRangeKeyUnset varstring varstring
173 : // InternalKeyKindRangeKeyDelete varstring varstring
174 : //
175 : // The intuitive understanding here are that the arguments to Delete, Set,
176 : // Merge, DeleteRange and RangeKeyDelete are encoded into the batch. The
177 : // RangeKeySet and RangeKeyUnset operations are slightly more complicated,
178 : // encoding their end key, suffix and value [in the case of RangeKeySet] within
179 : // the Value varstring. For more information on the value encoding for
180 : // RangeKeySet and RangeKeyUnset, see the internal/rangekey package.
181 : //
182 : // The internal batch representation is the on disk format for a batch in the
183 : // WAL, and thus stable. New record kinds may be added, but the existing ones
184 : // will not be modified.
185 : type Batch struct {
186 : batchInternal
187 : applied atomic.Bool
188 : }
189 :
190 : // batchInternal contains the set of fields within Batch that are non-atomic and
191 : // capable of being reset using a *b = batchInternal{} struct copy.
192 : type batchInternal struct {
193 : // Data is the wire format of a batch's log entry:
194 : // - 8 bytes for a sequence number of the first batch element,
195 : // or zeroes if the batch has not yet been applied,
196 : // - 4 bytes for the count: the number of elements in the batch,
197 : // or "\xff\xff\xff\xff" if the batch is invalid,
198 : // - count elements, being:
199 : // - one byte for the kind
200 : // - the varint-string user key,
201 : // - the varint-string value (if kind != delete).
202 : // The sequence number and count are stored in little-endian order.
203 : //
204 : // The data field can be (but is not guaranteed to be) nil for new
205 : // batches. Large batches will set the data field to nil when committed as
206 : // the data has been moved to a flushableBatch and inserted into the queue of
207 : // memtables.
208 : data []byte
209 : cmp Compare
210 : formatKey base.FormatKey
211 : abbreviatedKey AbbreviatedKey
212 :
213 : // An upper bound on required space to add this batch to a memtable.
214 : // Note that although batches are limited to 4 GiB in size, that limit
215 : // applies to len(data), not the memtable size. The upper bound on the
216 : // size of a memtable node is larger than the overhead of the batch's log
217 : // encoding, so memTableSize is larger than len(data) and may overflow a
218 : // uint32.
219 : memTableSize uint64
220 :
221 : // The db to which the batch will be committed. Do not change this field
222 : // after the batch has been created as it might invalidate internal state.
223 : // Batch.memTableSize is only refreshed if Batch.db is set. Setting db to
224 : // nil once it has been set implies that the Batch has encountered an error.
225 : db *DB
226 :
227 : // The count of records in the batch. This count will be stored in the batch
228 : // data whenever Repr() is called.
229 : count uint64
230 :
231 : // The count of range deletions in the batch. Updated every time a range
232 : // deletion is added.
233 : countRangeDels uint64
234 :
235 : // The count of range key sets, unsets and deletes in the batch. Updated
236 : // every time a RANGEKEYSET, RANGEKEYUNSET or RANGEKEYDEL key is added.
237 : countRangeKeys uint64
238 :
239 : // A deferredOp struct, stored in the Batch so that a pointer can be returned
240 : // from the *Deferred() methods rather than a value.
241 : deferredOp DeferredBatchOp
242 :
243 : // An optional skiplist keyed by offset into data of the entry.
244 : index *batchskl.Skiplist
245 : rangeDelIndex *batchskl.Skiplist
246 : rangeKeyIndex *batchskl.Skiplist
247 :
248 : // Fragmented range deletion tombstones. Cached the first time a range
249 : // deletion iterator is requested. The cache is invalidated whenever a new
250 : // range deletion is added to the batch. This cache can only be used when
251 : // opening an iterator to read at a batch sequence number >=
252 : // tombstonesSeqNum. This is the case for all new iterators created over a
253 : // batch but it's not the case for all cloned iterators.
254 : tombstones []keyspan.Span
255 : tombstonesSeqNum uint64
256 :
257 : // Fragmented range key spans. Cached the first time a range key iterator is
258 : // requested. The cache is invalidated whenever a new range key
259 : // (RangeKey{Set,Unset,Del}) is added to the batch. This cache can only be
260 : // used when opening an iterator to read at a batch sequence number >=
261 : // tombstonesSeqNum. This is the case for all new iterators created over a
262 : // batch but it's not the case for all cloned iterators.
263 : rangeKeys []keyspan.Span
264 : rangeKeysSeqNum uint64
265 :
266 : // The flushableBatch wrapper if the batch is too large to fit in the
267 : // memtable.
268 : flushable *flushableBatch
269 :
270 : // minimumFormatMajorVersion indicates the format major version required in
271 : // order to commit this batch. If an operation requires a particular format
272 : // major version, it ratchets the batch's minimumFormatMajorVersion. When
273 : // the batch is committed, this is validated against the database's current
274 : // format major version.
275 : minimumFormatMajorVersion FormatMajorVersion
276 :
277 : // Synchronous Apply uses the commit WaitGroup for both publishing the
278 : // seqnum and waiting for the WAL fsync (if needed). Asynchronous
279 : // ApplyNoSyncWait, which implies WriteOptions.Sync is true, uses the commit
280 : // WaitGroup for publishing the seqnum and the fsyncWait WaitGroup for
281 : // waiting for the WAL fsync.
282 : //
283 : // TODO(sumeer): if we find that ApplyNoSyncWait in conjunction with
284 : // SyncWait is causing higher memory usage because of the time duration
285 : // between when the sync is already done, and a goroutine calls SyncWait
286 : // (followed by Batch.Close), we could separate out {fsyncWait, commitErr}
287 : // into a separate struct that is allocated separately (using another
288 : // sync.Pool), and only that struct needs to outlive Batch.Close (which
289 : // could then be called immediately after ApplyNoSyncWait). commitStats
290 : // will also need to be in this separate struct.
291 : commit sync.WaitGroup
292 : fsyncWait sync.WaitGroup
293 :
294 : commitStats BatchCommitStats
295 :
296 : commitErr error
297 :
298 : // Position bools together to reduce the sizeof the struct.
299 :
300 : // ingestedSSTBatch indicates that the batch contains one or more key kinds
301 : // of InternalKeyKindIngestSST. If the batch contains key kinds of IngestSST
302 : // then it will only contain key kinds of IngestSST.
303 : ingestedSSTBatch bool
304 :
305 : // committing is set to true when a batch begins to commit. It's used to
306 : // ensure the batch is not mutated concurrently. It is not an atomic
307 : // deliberately, so as to avoid the overhead on batch mutations. This is
308 : // okay, because under correct usage this field will never be accessed
309 : // concurrently. It's only under incorrect usage the memory accesses of this
310 : // variable may violate memory safety. Since we don't use atomics here,
311 : // false negatives are possible.
312 : committing bool
313 : }
314 :
315 : // BatchCommitStats exposes stats related to committing a batch.
316 : //
317 : // NB: there is no Pebble internal tracing (using LoggerAndTracer) of slow
318 : // batch commits. The caller can use these stats to do their own tracing as
319 : // needed.
320 : type BatchCommitStats struct {
321 : // TotalDuration is the time spent in DB.{Apply,ApplyNoSyncWait} or
322 : // Batch.Commit, plus the time waiting in Batch.SyncWait. If there is a gap
323 : // between calling ApplyNoSyncWait and calling SyncWait, that gap could
324 : // include some duration in which real work was being done for the commit
325 : // and will not be included here. This missing time is considered acceptable
326 : // since the goal of these stats is to understand user-facing latency.
327 : //
328 : // TotalDuration includes time spent in various queues both inside Pebble
329 : // and outside Pebble (I/O queues, goroutine scheduler queue, mutex wait
330 : // etc.). For some of these queues (which we consider important) the wait
331 : // times are included below -- these expose low-level implementation detail
332 : // and are meant for expert diagnosis and subject to change. There may be
333 : // unaccounted time after subtracting those values from TotalDuration.
334 : TotalDuration time.Duration
335 : // SemaphoreWaitDuration is the wait time for semaphores in
336 : // commitPipeline.Commit.
337 : SemaphoreWaitDuration time.Duration
338 : // WALQueueWaitDuration is the wait time for allocating memory blocks in the
339 : // LogWriter (due to the LogWriter not writing fast enough). At the moment
340 : // this is duration is always zero because a single WAL will allow
341 : // allocating memory blocks up to the entire memtable size. In the future,
342 : // we may pipeline WALs and bound the WAL queued blocks separately, so this
343 : // field is preserved for that possibility.
344 : WALQueueWaitDuration time.Duration
345 : // MemTableWriteStallDuration is the wait caused by a write stall due to too
346 : // many memtables (due to not flushing fast enough).
347 : MemTableWriteStallDuration time.Duration
348 : // L0ReadAmpWriteStallDuration is the wait caused by a write stall due to
349 : // high read amplification in L0 (due to not compacting fast enough out of
350 : // L0).
351 : L0ReadAmpWriteStallDuration time.Duration
352 : // WALRotationDuration is the wait time for WAL rotation, which includes
353 : // syncing and closing the old WAL and creating (or reusing) a new one.
354 : WALRotationDuration time.Duration
355 : // CommitWaitDuration is the wait for publishing the seqnum plus the
356 : // duration for the WAL sync (if requested). The former should be tiny and
357 : // one can assume that this is all due to the WAL sync.
358 : CommitWaitDuration time.Duration
359 : }
360 :
361 : var _ Reader = (*Batch)(nil)
362 : var _ Writer = (*Batch)(nil)
363 :
364 : var batchPool = sync.Pool{
365 1 : New: func() interface{} {
366 1 : return &Batch{}
367 1 : },
368 : }
369 :
370 : type indexedBatch struct {
371 : batch Batch
372 : index batchskl.Skiplist
373 : }
374 :
375 : var indexedBatchPool = sync.Pool{
376 1 : New: func() interface{} {
377 1 : return &indexedBatch{}
378 1 : },
379 : }
380 :
381 1 : func newBatch(db *DB) *Batch {
382 1 : b := batchPool.Get().(*Batch)
383 1 : b.db = db
384 1 : return b
385 1 : }
386 :
387 1 : func newBatchWithSize(db *DB, size int) *Batch {
388 1 : b := newBatch(db)
389 1 : if cap(b.data) < size {
390 1 : b.data = rawalloc.New(0, size)
391 1 : }
392 1 : return b
393 : }
394 :
395 1 : func newIndexedBatch(db *DB, comparer *Comparer) *Batch {
396 1 : i := indexedBatchPool.Get().(*indexedBatch)
397 1 : i.batch.cmp = comparer.Compare
398 1 : i.batch.formatKey = comparer.FormatKey
399 1 : i.batch.abbreviatedKey = comparer.AbbreviatedKey
400 1 : i.batch.db = db
401 1 : i.batch.index = &i.index
402 1 : i.batch.index.Init(&i.batch.data, i.batch.cmp, i.batch.abbreviatedKey)
403 1 : return &i.batch
404 1 : }
405 :
406 0 : func newIndexedBatchWithSize(db *DB, comparer *Comparer, size int) *Batch {
407 0 : b := newIndexedBatch(db, comparer)
408 0 : if cap(b.data) < size {
409 0 : b.data = rawalloc.New(0, size)
410 0 : }
411 0 : return b
412 : }
413 :
414 : // nextSeqNum returns the batch "sequence number" that will be given to the next
415 : // key written to the batch. During iteration keys within an indexed batch are
416 : // given a sequence number consisting of their offset within the batch combined
417 : // with the base.InternalKeySeqNumBatch bit. These sequence numbers are only
418 : // used during iteration, and the keys are assigned ordinary sequence numbers
419 : // when the batch is committed.
420 1 : func (b *Batch) nextSeqNum() uint64 {
421 1 : return uint64(len(b.data)) | base.InternalKeySeqNumBatch
422 1 : }
423 :
424 1 : func (b *Batch) release() {
425 1 : if b.db == nil {
426 1 : // The batch was not created using newBatch or newIndexedBatch, or an error
427 1 : // was encountered. We don't try to reuse batches that encountered an error
428 1 : // because they might be stuck somewhere in the system and attempting to
429 1 : // reuse such batches is a recipe for onerous debugging sessions. Instead,
430 1 : // let the GC do its job.
431 1 : return
432 1 : }
433 1 : b.db = nil
434 1 :
435 1 : // NB: This is ugly (it would be cleaner if we could just assign a Batch{}),
436 1 : // but necessary so that we can use atomic.StoreUint32 for the Batch.applied
437 1 : // field. Without using an atomic to clear that field the Go race detector
438 1 : // complains.
439 1 : b.Reset()
440 1 : b.cmp = nil
441 1 : b.formatKey = nil
442 1 : b.abbreviatedKey = nil
443 1 :
444 1 : if b.index == nil {
445 1 : batchPool.Put(b)
446 1 : } else {
447 1 : b.index, b.rangeDelIndex, b.rangeKeyIndex = nil, nil, nil
448 1 : indexedBatchPool.Put((*indexedBatch)(unsafe.Pointer(b)))
449 1 : }
450 : }
451 :
452 1 : func (b *Batch) refreshMemTableSize() {
453 1 : b.memTableSize = 0
454 1 : if len(b.data) < batchHeaderLen {
455 1 : return
456 1 : }
457 :
458 1 : b.countRangeDels = 0
459 1 : b.countRangeKeys = 0
460 1 : b.minimumFormatMajorVersion = 0
461 1 : for r := b.Reader(); ; {
462 1 : kind, key, value, ok := r.Next()
463 1 : if !ok {
464 1 : break
465 : }
466 1 : switch kind {
467 1 : case InternalKeyKindRangeDelete:
468 1 : b.countRangeDels++
469 1 : case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
470 1 : b.countRangeKeys++
471 0 : case InternalKeyKindDeleteSized:
472 0 : if b.minimumFormatMajorVersion < FormatDeleteSizedAndObsolete {
473 0 : b.minimumFormatMajorVersion = FormatDeleteSizedAndObsolete
474 0 : }
475 1 : case InternalKeyKindIngestSST:
476 1 : if b.minimumFormatMajorVersion < FormatFlushableIngest {
477 1 : b.minimumFormatMajorVersion = FormatFlushableIngest
478 1 : }
479 : // This key kind doesn't contribute to the memtable size.
480 1 : continue
481 : }
482 1 : b.memTableSize += memTableEntrySize(len(key), len(value))
483 : }
484 1 : if b.countRangeKeys > 0 && b.minimumFormatMajorVersion < FormatRangeKeys {
485 1 : b.minimumFormatMajorVersion = FormatRangeKeys
486 1 : }
487 : }
488 :
489 : // Apply the operations contained in the batch to the receiver batch.
490 : //
491 : // It is safe to modify the contents of the arguments after Apply returns.
492 1 : func (b *Batch) Apply(batch *Batch, _ *WriteOptions) error {
493 1 : if b.ingestedSSTBatch {
494 0 : panic("pebble: invalid batch application")
495 : }
496 1 : if len(batch.data) == 0 {
497 0 : return nil
498 0 : }
499 1 : if len(batch.data) < batchHeaderLen {
500 0 : return base.CorruptionErrorf("pebble: invalid batch")
501 0 : }
502 :
503 1 : offset := len(b.data)
504 1 : if offset == 0 {
505 1 : b.init(offset)
506 1 : offset = batchHeaderLen
507 1 : }
508 1 : b.data = append(b.data, batch.data[batchHeaderLen:]...)
509 1 :
510 1 : b.setCount(b.Count() + batch.Count())
511 1 :
512 1 : if b.db != nil || b.index != nil {
513 1 : // Only iterate over the new entries if we need to track memTableSize or in
514 1 : // order to update the index.
515 1 : for iter := BatchReader(b.data[offset:]); len(iter) > 0; {
516 1 : offset := uintptr(unsafe.Pointer(&iter[0])) - uintptr(unsafe.Pointer(&b.data[0]))
517 1 : kind, key, value, ok := iter.Next()
518 1 : if !ok {
519 0 : break
520 : }
521 1 : switch kind {
522 1 : case InternalKeyKindRangeDelete:
523 1 : b.countRangeDels++
524 1 : case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
525 1 : b.countRangeKeys++
526 0 : case InternalKeyKindIngestSST:
527 0 : panic("pebble: invalid key kind for batch")
528 : }
529 1 : if b.index != nil {
530 1 : var err error
531 1 : switch kind {
532 1 : case InternalKeyKindRangeDelete:
533 1 : b.tombstones = nil
534 1 : b.tombstonesSeqNum = 0
535 1 : if b.rangeDelIndex == nil {
536 1 : b.rangeDelIndex = batchskl.NewSkiplist(&b.data, b.cmp, b.abbreviatedKey)
537 1 : }
538 1 : err = b.rangeDelIndex.Add(uint32(offset))
539 1 : case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
540 1 : b.rangeKeys = nil
541 1 : b.rangeKeysSeqNum = 0
542 1 : if b.rangeKeyIndex == nil {
543 1 : b.rangeKeyIndex = batchskl.NewSkiplist(&b.data, b.cmp, b.abbreviatedKey)
544 1 : }
545 1 : err = b.rangeKeyIndex.Add(uint32(offset))
546 1 : default:
547 1 : err = b.index.Add(uint32(offset))
548 : }
549 1 : if err != nil {
550 0 : return err
551 0 : }
552 : }
553 1 : b.memTableSize += memTableEntrySize(len(key), len(value))
554 : }
555 : }
556 1 : return nil
557 : }
558 :
559 : // Get gets the value for the given key. It returns ErrNotFound if the Batch
560 : // does not contain the key.
561 : //
562 : // The caller should not modify the contents of the returned slice, but it is
563 : // safe to modify the contents of the argument after Get returns. The returned
564 : // slice will remain valid until the returned Closer is closed. On success, the
565 : // caller MUST call closer.Close() or a memory leak will occur.
566 1 : func (b *Batch) Get(key []byte) ([]byte, io.Closer, error) {
567 1 : if b.index == nil {
568 0 : return nil, nil, ErrNotIndexed
569 0 : }
570 1 : return b.db.getInternal(key, b, nil /* snapshot */)
571 : }
572 :
573 1 : func (b *Batch) prepareDeferredKeyValueRecord(keyLen, valueLen int, kind InternalKeyKind) {
574 1 : if b.committing {
575 0 : panic("pebble: batch already committing")
576 : }
577 1 : if len(b.data) == 0 {
578 1 : b.init(keyLen + valueLen + 2*binary.MaxVarintLen64 + batchHeaderLen)
579 1 : }
580 1 : b.count++
581 1 : b.memTableSize += memTableEntrySize(keyLen, valueLen)
582 1 :
583 1 : pos := len(b.data)
584 1 : b.deferredOp.offset = uint32(pos)
585 1 : b.grow(1 + 2*maxVarintLen32 + keyLen + valueLen)
586 1 : b.data[pos] = byte(kind)
587 1 : pos++
588 1 :
589 1 : {
590 1 : // TODO(peter): Manually inlined version binary.PutUvarint(). This is 20%
591 1 : // faster on BenchmarkBatchSet on go1.13. Remove if go1.14 or future
592 1 : // versions show this to not be a performance win.
593 1 : x := uint32(keyLen)
594 1 : for x >= 0x80 {
595 1 : b.data[pos] = byte(x) | 0x80
596 1 : x >>= 7
597 1 : pos++
598 1 : }
599 1 : b.data[pos] = byte(x)
600 1 : pos++
601 : }
602 :
603 1 : b.deferredOp.Key = b.data[pos : pos+keyLen]
604 1 : pos += keyLen
605 1 :
606 1 : {
607 1 : // TODO(peter): Manually inlined version binary.PutUvarint(). This is 20%
608 1 : // faster on BenchmarkBatchSet on go1.13. Remove if go1.14 or future
609 1 : // versions show this to not be a performance win.
610 1 : x := uint32(valueLen)
611 1 : for x >= 0x80 {
612 1 : b.data[pos] = byte(x) | 0x80
613 1 : x >>= 7
614 1 : pos++
615 1 : }
616 1 : b.data[pos] = byte(x)
617 1 : pos++
618 : }
619 :
620 1 : b.deferredOp.Value = b.data[pos : pos+valueLen]
621 1 : // Shrink data since varints may be shorter than the upper bound.
622 1 : b.data = b.data[:pos+valueLen]
623 : }
624 :
625 1 : func (b *Batch) prepareDeferredKeyRecord(keyLen int, kind InternalKeyKind) {
626 1 : if b.committing {
627 0 : panic("pebble: batch already committing")
628 : }
629 1 : if len(b.data) == 0 {
630 1 : b.init(keyLen + binary.MaxVarintLen64 + batchHeaderLen)
631 1 : }
632 1 : b.count++
633 1 : b.memTableSize += memTableEntrySize(keyLen, 0)
634 1 :
635 1 : pos := len(b.data)
636 1 : b.deferredOp.offset = uint32(pos)
637 1 : b.grow(1 + maxVarintLen32 + keyLen)
638 1 : b.data[pos] = byte(kind)
639 1 : pos++
640 1 :
641 1 : {
642 1 : // TODO(peter): Manually inlined version binary.PutUvarint(). Remove if
643 1 : // go1.13 or future versions show this to not be a performance win. See
644 1 : // BenchmarkBatchSet.
645 1 : x := uint32(keyLen)
646 1 : for x >= 0x80 {
647 0 : b.data[pos] = byte(x) | 0x80
648 0 : x >>= 7
649 0 : pos++
650 0 : }
651 1 : b.data[pos] = byte(x)
652 1 : pos++
653 : }
654 :
655 1 : b.deferredOp.Key = b.data[pos : pos+keyLen]
656 1 : b.deferredOp.Value = nil
657 1 :
658 1 : // Shrink data since varint may be shorter than the upper bound.
659 1 : b.data = b.data[:pos+keyLen]
660 : }
661 :
662 : // AddInternalKey allows the caller to add an internal key of point key or range
663 : // key kinds (but not RangeDelete) to a batch. Passing in an internal key of
664 : // kind RangeDelete will result in a panic. Note that the seqnum in the internal
665 : // key is effectively ignored, even though the Kind is preserved. This is
666 : // because the batch format does not allow for a per-key seqnum to be specified,
667 : // only a batch-wide one.
668 : //
669 : // Note that non-indexed keys (IngestKeyKind{LogData,IngestSST}) are not
670 : // supported with this method as they require specialized logic.
671 1 : func (b *Batch) AddInternalKey(key *base.InternalKey, value []byte, _ *WriteOptions) error {
672 1 : keyLen := len(key.UserKey)
673 1 : hasValue := false
674 1 : switch kind := key.Kind(); kind {
675 0 : case InternalKeyKindRangeDelete:
676 0 : panic("unexpected range delete in AddInternalKey")
677 1 : case InternalKeyKindSingleDelete, InternalKeyKindDelete:
678 1 : b.prepareDeferredKeyRecord(keyLen, kind)
679 1 : b.deferredOp.index = b.index
680 1 : case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
681 1 : b.prepareDeferredKeyValueRecord(keyLen, len(value), kind)
682 1 : hasValue = true
683 1 : b.incrementRangeKeysCount()
684 1 : default:
685 1 : b.prepareDeferredKeyValueRecord(keyLen, len(value), kind)
686 1 : hasValue = true
687 1 : b.deferredOp.index = b.index
688 : }
689 1 : copy(b.deferredOp.Key, key.UserKey)
690 1 : if hasValue {
691 1 : copy(b.deferredOp.Value, value)
692 1 : }
693 :
694 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
695 : // in go1.13 will remove the need for this.
696 1 : if b.index != nil {
697 0 : if err := b.index.Add(b.deferredOp.offset); err != nil {
698 0 : return err
699 0 : }
700 : }
701 1 : return nil
702 : }
703 :
704 : // Set adds an action to the batch that sets the key to map to the value.
705 : //
706 : // It is safe to modify the contents of the arguments after Set returns.
707 1 : func (b *Batch) Set(key, value []byte, _ *WriteOptions) error {
708 1 : deferredOp := b.SetDeferred(len(key), len(value))
709 1 : copy(deferredOp.Key, key)
710 1 : copy(deferredOp.Value, value)
711 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
712 1 : // in go1.13 will remove the need for this.
713 1 : if b.index != nil {
714 1 : if err := b.index.Add(deferredOp.offset); err != nil {
715 0 : return err
716 0 : }
717 : }
718 1 : return nil
719 : }
720 :
721 : // SetDeferred is similar to Set in that it adds a set operation to the batch,
722 : // except it only takes in key/value lengths instead of complete slices,
723 : // letting the caller encode into those objects and then call Finish() on the
724 : // returned object.
725 1 : func (b *Batch) SetDeferred(keyLen, valueLen int) *DeferredBatchOp {
726 1 : b.prepareDeferredKeyValueRecord(keyLen, valueLen, InternalKeyKindSet)
727 1 : b.deferredOp.index = b.index
728 1 : return &b.deferredOp
729 1 : }
730 :
731 : // Merge adds an action to the batch that merges the value at key with the new
732 : // value. The details of the merge are dependent upon the configured merge
733 : // operator.
734 : //
735 : // It is safe to modify the contents of the arguments after Merge returns.
736 1 : func (b *Batch) Merge(key, value []byte, _ *WriteOptions) error {
737 1 : deferredOp := b.MergeDeferred(len(key), len(value))
738 1 : copy(deferredOp.Key, key)
739 1 : copy(deferredOp.Value, value)
740 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
741 1 : // in go1.13 will remove the need for this.
742 1 : if b.index != nil {
743 1 : if err := b.index.Add(deferredOp.offset); err != nil {
744 0 : return err
745 0 : }
746 : }
747 1 : return nil
748 : }
749 :
750 : // MergeDeferred is similar to Merge in that it adds a merge operation to the
751 : // batch, except it only takes in key/value lengths instead of complete slices,
752 : // letting the caller encode into those objects and then call Finish() on the
753 : // returned object.
754 1 : func (b *Batch) MergeDeferred(keyLen, valueLen int) *DeferredBatchOp {
755 1 : b.prepareDeferredKeyValueRecord(keyLen, valueLen, InternalKeyKindMerge)
756 1 : b.deferredOp.index = b.index
757 1 : return &b.deferredOp
758 1 : }
759 :
760 : // Delete adds an action to the batch that deletes the entry for key.
761 : //
762 : // It is safe to modify the contents of the arguments after Delete returns.
763 1 : func (b *Batch) Delete(key []byte, _ *WriteOptions) error {
764 1 : deferredOp := b.DeleteDeferred(len(key))
765 1 : copy(deferredOp.Key, key)
766 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
767 1 : // in go1.13 will remove the need for this.
768 1 : if b.index != nil {
769 1 : if err := b.index.Add(deferredOp.offset); err != nil {
770 0 : return err
771 0 : }
772 : }
773 1 : return nil
774 : }
775 :
776 : // DeleteDeferred is similar to Delete in that it adds a delete operation to
777 : // the batch, except it only takes in key/value lengths instead of complete
778 : // slices, letting the caller encode into those objects and then call Finish()
779 : // on the returned object.
780 1 : func (b *Batch) DeleteDeferred(keyLen int) *DeferredBatchOp {
781 1 : b.prepareDeferredKeyRecord(keyLen, InternalKeyKindDelete)
782 1 : b.deferredOp.index = b.index
783 1 : return &b.deferredOp
784 1 : }
785 :
786 : // DeleteSized behaves identically to Delete, but takes an additional
787 : // argument indicating the size of the value being deleted. DeleteSized
788 : // should be preferred when the caller has the expectation that there exists
789 : // a single internal KV pair for the key (eg, the key has not been
790 : // overwritten recently), and the caller knows the size of its value.
791 : //
792 : // DeleteSized will record the value size within the tombstone and use it to
793 : // inform compaction-picking heuristics which strive to reduce space
794 : // amplification in the LSM. This "calling your shot" mechanic allows the
795 : // storage engine to more accurately estimate and reduce space amplification.
796 : //
797 : // It is safe to modify the contents of the arguments after DeleteSized
798 : // returns.
799 1 : func (b *Batch) DeleteSized(key []byte, deletedValueSize uint32, _ *WriteOptions) error {
800 1 : deferredOp := b.DeleteSizedDeferred(len(key), deletedValueSize)
801 1 : copy(b.deferredOp.Key, key)
802 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Check if in a
803 1 : // later Go release this is unnecessary.
804 1 : if b.index != nil {
805 0 : if err := b.index.Add(deferredOp.offset); err != nil {
806 0 : return err
807 0 : }
808 : }
809 1 : return nil
810 : }
811 :
812 : // DeleteSizedDeferred is similar to DeleteSized in that it adds a sized delete
813 : // operation to the batch, except it only takes in key length instead of a
814 : // complete key slice, letting the caller encode into the DeferredBatchOp.Key
815 : // slice and then call Finish() on the returned object.
816 1 : func (b *Batch) DeleteSizedDeferred(keyLen int, deletedValueSize uint32) *DeferredBatchOp {
817 1 : if b.minimumFormatMajorVersion < FormatDeleteSizedAndObsolete {
818 1 : b.minimumFormatMajorVersion = FormatDeleteSizedAndObsolete
819 1 : }
820 :
821 : // Encode the sum of the key length and the value in the value.
822 1 : v := uint64(deletedValueSize) + uint64(keyLen)
823 1 :
824 1 : // Encode `v` as a varint.
825 1 : var buf [binary.MaxVarintLen64]byte
826 1 : n := 0
827 1 : {
828 1 : x := v
829 1 : for x >= 0x80 {
830 1 : buf[n] = byte(x) | 0x80
831 1 : x >>= 7
832 1 : n++
833 1 : }
834 1 : buf[n] = byte(x)
835 1 : n++
836 : }
837 :
838 : // NB: In batch entries and sstable entries, values are stored as
839 : // varstrings. Here, the value is itself a simple varint. This results in an
840 : // unnecessary double layer of encoding:
841 : // varint(n) varint(deletedValueSize)
842 : // The first varint will always be 1-byte, since a varint-encoded uint64
843 : // will never exceed 128 bytes. This unnecessary extra byte and wrapping is
844 : // preserved to avoid special casing across the database, and in particular
845 : // in sstable block decoding which is performance sensitive.
846 1 : b.prepareDeferredKeyValueRecord(keyLen, n, InternalKeyKindDeleteSized)
847 1 : b.deferredOp.index = b.index
848 1 : copy(b.deferredOp.Value, buf[:n])
849 1 : return &b.deferredOp
850 : }
851 :
852 : // SingleDelete adds an action to the batch that single deletes the entry for key.
853 : // See Writer.SingleDelete for more details on the semantics of SingleDelete.
854 : //
855 : // It is safe to modify the contents of the arguments after SingleDelete returns.
856 1 : func (b *Batch) SingleDelete(key []byte, _ *WriteOptions) error {
857 1 : deferredOp := b.SingleDeleteDeferred(len(key))
858 1 : copy(deferredOp.Key, key)
859 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
860 1 : // in go1.13 will remove the need for this.
861 1 : if b.index != nil {
862 1 : if err := b.index.Add(deferredOp.offset); err != nil {
863 0 : return err
864 0 : }
865 : }
866 1 : return nil
867 : }
868 :
869 : // SingleDeleteDeferred is similar to SingleDelete in that it adds a single delete
870 : // operation to the batch, except it only takes in key/value lengths instead of
871 : // complete slices, letting the caller encode into those objects and then call
872 : // Finish() on the returned object.
873 1 : func (b *Batch) SingleDeleteDeferred(keyLen int) *DeferredBatchOp {
874 1 : b.prepareDeferredKeyRecord(keyLen, InternalKeyKindSingleDelete)
875 1 : b.deferredOp.index = b.index
876 1 : return &b.deferredOp
877 1 : }
878 :
879 : // DeleteRange deletes all of the point keys (and values) in the range
880 : // [start,end) (inclusive on start, exclusive on end). DeleteRange does NOT
881 : // delete overlapping range keys (eg, keys set via RangeKeySet).
882 : //
883 : // It is safe to modify the contents of the arguments after DeleteRange
884 : // returns.
885 1 : func (b *Batch) DeleteRange(start, end []byte, _ *WriteOptions) error {
886 1 : deferredOp := b.DeleteRangeDeferred(len(start), len(end))
887 1 : copy(deferredOp.Key, start)
888 1 : copy(deferredOp.Value, end)
889 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
890 1 : // in go1.13 will remove the need for this.
891 1 : if deferredOp.index != nil {
892 1 : if err := deferredOp.index.Add(deferredOp.offset); err != nil {
893 0 : return err
894 0 : }
895 : }
896 1 : return nil
897 : }
898 :
899 : // DeleteRangeDeferred is similar to DeleteRange in that it adds a delete range
900 : // operation to the batch, except it only takes in key lengths instead of
901 : // complete slices, letting the caller encode into those objects and then call
902 : // Finish() on the returned object. Note that DeferredBatchOp.Key should be
903 : // populated with the start key, and DeferredBatchOp.Value should be populated
904 : // with the end key.
905 1 : func (b *Batch) DeleteRangeDeferred(startLen, endLen int) *DeferredBatchOp {
906 1 : b.prepareDeferredKeyValueRecord(startLen, endLen, InternalKeyKindRangeDelete)
907 1 : b.countRangeDels++
908 1 : if b.index != nil {
909 1 : b.tombstones = nil
910 1 : b.tombstonesSeqNum = 0
911 1 : // Range deletions are rare, so we lazily allocate the index for them.
912 1 : if b.rangeDelIndex == nil {
913 1 : b.rangeDelIndex = batchskl.NewSkiplist(&b.data, b.cmp, b.abbreviatedKey)
914 1 : }
915 1 : b.deferredOp.index = b.rangeDelIndex
916 : }
917 1 : return &b.deferredOp
918 : }
919 :
920 : // RangeKeySet sets a range key mapping the key range [start, end) at the MVCC
921 : // timestamp suffix to value. The suffix is optional. If any portion of the key
922 : // range [start, end) is already set by a range key with the same suffix value,
923 : // RangeKeySet overrides it.
924 : //
925 : // It is safe to modify the contents of the arguments after RangeKeySet returns.
926 1 : func (b *Batch) RangeKeySet(start, end, suffix, value []byte, _ *WriteOptions) error {
927 1 : suffixValues := [1]rangekey.SuffixValue{{Suffix: suffix, Value: value}}
928 1 : internalValueLen := rangekey.EncodedSetValueLen(end, suffixValues[:])
929 1 :
930 1 : deferredOp := b.rangeKeySetDeferred(len(start), internalValueLen)
931 1 : copy(deferredOp.Key, start)
932 1 : n := rangekey.EncodeSetValue(deferredOp.Value, end, suffixValues[:])
933 1 : if n != internalValueLen {
934 0 : panic("unexpected internal value length mismatch")
935 : }
936 :
937 : // Manually inline DeferredBatchOp.Finish().
938 1 : if deferredOp.index != nil {
939 1 : if err := deferredOp.index.Add(deferredOp.offset); err != nil {
940 0 : return err
941 0 : }
942 : }
943 1 : return nil
944 : }
945 :
946 1 : func (b *Batch) rangeKeySetDeferred(startLen, internalValueLen int) *DeferredBatchOp {
947 1 : b.prepareDeferredKeyValueRecord(startLen, internalValueLen, InternalKeyKindRangeKeySet)
948 1 : b.incrementRangeKeysCount()
949 1 : return &b.deferredOp
950 1 : }
951 :
952 1 : func (b *Batch) incrementRangeKeysCount() {
953 1 : b.countRangeKeys++
954 1 : if b.minimumFormatMajorVersion < FormatRangeKeys {
955 1 : b.minimumFormatMajorVersion = FormatRangeKeys
956 1 : }
957 1 : if b.index != nil {
958 1 : b.rangeKeys = nil
959 1 : b.rangeKeysSeqNum = 0
960 1 : // Range keys are rare, so we lazily allocate the index for them.
961 1 : if b.rangeKeyIndex == nil {
962 1 : b.rangeKeyIndex = batchskl.NewSkiplist(&b.data, b.cmp, b.abbreviatedKey)
963 1 : }
964 1 : b.deferredOp.index = b.rangeKeyIndex
965 : }
966 : }
967 :
968 : // RangeKeyUnset removes a range key mapping the key range [start, end) at the
969 : // MVCC timestamp suffix. The suffix may be omitted to remove an unsuffixed
970 : // range key. RangeKeyUnset only removes portions of range keys that fall within
971 : // the [start, end) key span, and only range keys with suffixes that exactly
972 : // match the unset suffix.
973 : //
974 : // It is safe to modify the contents of the arguments after RangeKeyUnset
975 : // returns.
976 1 : func (b *Batch) RangeKeyUnset(start, end, suffix []byte, _ *WriteOptions) error {
977 1 : suffixes := [1][]byte{suffix}
978 1 : internalValueLen := rangekey.EncodedUnsetValueLen(end, suffixes[:])
979 1 :
980 1 : deferredOp := b.rangeKeyUnsetDeferred(len(start), internalValueLen)
981 1 : copy(deferredOp.Key, start)
982 1 : n := rangekey.EncodeUnsetValue(deferredOp.Value, end, suffixes[:])
983 1 : if n != internalValueLen {
984 0 : panic("unexpected internal value length mismatch")
985 : }
986 :
987 : // Manually inline DeferredBatchOp.Finish()
988 1 : if deferredOp.index != nil {
989 1 : if err := deferredOp.index.Add(deferredOp.offset); err != nil {
990 0 : return err
991 0 : }
992 : }
993 1 : return nil
994 : }
995 :
996 1 : func (b *Batch) rangeKeyUnsetDeferred(startLen, internalValueLen int) *DeferredBatchOp {
997 1 : b.prepareDeferredKeyValueRecord(startLen, internalValueLen, InternalKeyKindRangeKeyUnset)
998 1 : b.incrementRangeKeysCount()
999 1 : return &b.deferredOp
1000 1 : }
1001 :
1002 : // RangeKeyDelete deletes all of the range keys in the range [start,end)
1003 : // (inclusive on start, exclusive on end). It does not delete point keys (for
1004 : // that use DeleteRange). RangeKeyDelete removes all range keys within the
1005 : // bounds, including those with or without suffixes.
1006 : //
1007 : // It is safe to modify the contents of the arguments after RangeKeyDelete
1008 : // returns.
1009 1 : func (b *Batch) RangeKeyDelete(start, end []byte, _ *WriteOptions) error {
1010 1 : deferredOp := b.RangeKeyDeleteDeferred(len(start), len(end))
1011 1 : copy(deferredOp.Key, start)
1012 1 : copy(deferredOp.Value, end)
1013 1 : // Manually inline DeferredBatchOp.Finish().
1014 1 : if deferredOp.index != nil {
1015 1 : if err := deferredOp.index.Add(deferredOp.offset); err != nil {
1016 0 : return err
1017 0 : }
1018 : }
1019 1 : return nil
1020 : }
1021 :
1022 : // RangeKeyDeleteDeferred is similar to RangeKeyDelete in that it adds an
1023 : // operation to delete range keys to the batch, except it only takes in key
1024 : // lengths instead of complete slices, letting the caller encode into those
1025 : // objects and then call Finish() on the returned object. Note that
1026 : // DeferredBatchOp.Key should be populated with the start key, and
1027 : // DeferredBatchOp.Value should be populated with the end key.
1028 1 : func (b *Batch) RangeKeyDeleteDeferred(startLen, endLen int) *DeferredBatchOp {
1029 1 : b.prepareDeferredKeyValueRecord(startLen, endLen, InternalKeyKindRangeKeyDelete)
1030 1 : b.incrementRangeKeysCount()
1031 1 : return &b.deferredOp
1032 1 : }
1033 :
1034 : // LogData adds the specified to the batch. The data will be written to the
1035 : // WAL, but not added to memtables or sstables. Log data is never indexed,
1036 : // which makes it useful for testing WAL performance.
1037 : //
1038 : // It is safe to modify the contents of the argument after LogData returns.
1039 1 : func (b *Batch) LogData(data []byte, _ *WriteOptions) error {
1040 1 : origCount, origMemTableSize := b.count, b.memTableSize
1041 1 : b.prepareDeferredKeyRecord(len(data), InternalKeyKindLogData)
1042 1 : copy(b.deferredOp.Key, data)
1043 1 : // Since LogData only writes to the WAL and does not affect the memtable, we
1044 1 : // restore b.count and b.memTableSize to their origin values. Note that
1045 1 : // Batch.count only refers to records that are added to the memtable.
1046 1 : b.count, b.memTableSize = origCount, origMemTableSize
1047 1 : return nil
1048 1 : }
1049 :
1050 : // IngestSST adds the FileNum for an sstable to the batch. The data will only be
1051 : // written to the WAL (not added to memtables or sstables).
1052 1 : func (b *Batch) ingestSST(fileNum base.FileNum) {
1053 1 : if b.Empty() {
1054 1 : b.ingestedSSTBatch = true
1055 1 : } else if !b.ingestedSSTBatch {
1056 0 : // Batch contains other key kinds.
1057 0 : panic("pebble: invalid call to ingestSST")
1058 : }
1059 :
1060 1 : origMemTableSize := b.memTableSize
1061 1 : var buf [binary.MaxVarintLen64]byte
1062 1 : length := binary.PutUvarint(buf[:], uint64(fileNum))
1063 1 : b.prepareDeferredKeyRecord(length, InternalKeyKindIngestSST)
1064 1 : copy(b.deferredOp.Key, buf[:length])
1065 1 : // Since IngestSST writes only to the WAL and does not affect the memtable,
1066 1 : // we restore b.memTableSize to its original value. Note that Batch.count
1067 1 : // is not reset because for the InternalKeyKindIngestSST the count is the
1068 1 : // number of sstable paths which have been added to the batch.
1069 1 : b.memTableSize = origMemTableSize
1070 1 : b.minimumFormatMajorVersion = FormatFlushableIngest
1071 : }
1072 :
1073 : // Empty returns true if the batch is empty, and false otherwise.
1074 1 : func (b *Batch) Empty() bool {
1075 1 : return len(b.data) <= batchHeaderLen
1076 1 : }
1077 :
1078 : // Len returns the current size of the batch in bytes.
1079 1 : func (b *Batch) Len() int {
1080 1 : if len(b.data) <= batchHeaderLen {
1081 1 : return batchHeaderLen
1082 1 : }
1083 1 : return len(b.data)
1084 : }
1085 :
1086 : // Repr returns the underlying batch representation. It is not safe to modify
1087 : // the contents. Reset() will not change the contents of the returned value,
1088 : // though any other mutation operation may do so.
1089 1 : func (b *Batch) Repr() []byte {
1090 1 : if len(b.data) == 0 {
1091 1 : b.init(batchHeaderLen)
1092 1 : }
1093 1 : binary.LittleEndian.PutUint32(b.countData(), b.Count())
1094 1 : return b.data
1095 : }
1096 :
1097 : // SetRepr sets the underlying batch representation. The batch takes ownership
1098 : // of the supplied slice. It is not safe to modify it afterwards until the
1099 : // Batch is no longer in use.
1100 1 : func (b *Batch) SetRepr(data []byte) error {
1101 1 : if len(data) < batchHeaderLen {
1102 0 : return base.CorruptionErrorf("invalid batch")
1103 0 : }
1104 1 : b.data = data
1105 1 : b.count = uint64(binary.LittleEndian.Uint32(b.countData()))
1106 1 : if b.db != nil {
1107 1 : // Only track memTableSize for batches that will be committed to the DB.
1108 1 : b.refreshMemTableSize()
1109 1 : }
1110 1 : return nil
1111 : }
1112 :
1113 : // NewIter returns an iterator that is unpositioned (Iterator.Valid() will
1114 : // return false). The iterator can be positioned via a call to SeekGE,
1115 : // SeekPrefixGE, SeekLT, First or Last. Only indexed batches support iterators.
1116 : //
1117 : // The returned Iterator observes all of the Batch's existing mutations, but no
1118 : // later mutations. Its view can be refreshed via RefreshBatchSnapshot or
1119 : // SetOptions().
1120 1 : func (b *Batch) NewIter(o *IterOptions) (*Iterator, error) {
1121 1 : return b.NewIterWithContext(context.Background(), o)
1122 1 : }
1123 :
1124 : // NewIterWithContext is like NewIter, and additionally accepts a context for
1125 : // tracing.
1126 1 : func (b *Batch) NewIterWithContext(ctx context.Context, o *IterOptions) (*Iterator, error) {
1127 1 : if b.index == nil {
1128 0 : return &Iterator{err: ErrNotIndexed}, nil
1129 0 : }
1130 1 : return b.db.newIter(ctx, b, newIterOpts{}, o), nil
1131 : }
1132 :
1133 : // NewBatchOnlyIter constructs an iterator that only reads the contents of the
1134 : // batch, and does not overlay the batch mutations on top of the DB state.
1135 : //
1136 : // The returned Iterator observes all of the Batch's existing mutations, but
1137 : // no later mutations. Its view can be refreshed via RefreshBatchSnapshot or
1138 : // SetOptions().
1139 1 : func (b *Batch) NewBatchOnlyIter(ctx context.Context, o *IterOptions) (*Iterator, error) {
1140 1 : if b.index == nil {
1141 0 : return &Iterator{err: ErrNotIndexed}, nil
1142 0 : }
1143 1 : return b.db.newIter(ctx, b, newIterOpts{batch: batchIterOpts{batchOnly: true}}, o), nil
1144 : }
1145 :
1146 : // newInternalIter creates a new internalIterator that iterates over the
1147 : // contents of the batch.
1148 1 : func (b *Batch) newInternalIter(o *IterOptions) *batchIter {
1149 1 : iter := &batchIter{}
1150 1 : b.initInternalIter(o, iter)
1151 1 : return iter
1152 1 : }
1153 :
1154 1 : func (b *Batch) initInternalIter(o *IterOptions, iter *batchIter) {
1155 1 : *iter = batchIter{
1156 1 : cmp: b.cmp,
1157 1 : batch: b,
1158 1 : iter: b.index.NewIter(o.GetLowerBound(), o.GetUpperBound()),
1159 1 : // NB: We explicitly do not propagate the batch snapshot to the point
1160 1 : // key iterator. Filtering point keys within the batch iterator can
1161 1 : // cause pathological behavior where a batch iterator advances
1162 1 : // significantly farther than necessary filtering many batch keys that
1163 1 : // are not visible at the batch sequence number. Instead, the merging
1164 1 : // iterator enforces bounds.
1165 1 : //
1166 1 : // For example, consider an engine that contains the committed keys
1167 1 : // 'bar' and 'bax', with no keys between them. Consider a batch
1168 1 : // containing keys 1,000 keys within the range [a,z]. All of the
1169 1 : // batch keys were added to the batch after the iterator was
1170 1 : // constructed, so they are not visible to the iterator. A call to
1171 1 : // SeekGE('bax') would seek the LSM iterators and discover the key
1172 1 : // 'bax'. It would also seek the batch iterator, landing on the key
1173 1 : // 'baz' but discover it that it's not visible. The batch iterator would
1174 1 : // next through the rest of the batch's keys, only to discover there are
1175 1 : // no visible keys greater than or equal to 'bax'.
1176 1 : //
1177 1 : // Filtering these batch points within the merging iterator ensures that
1178 1 : // the batch iterator never needs to iterate beyond 'baz', because it
1179 1 : // already found a smaller, visible key 'bax'.
1180 1 : snapshot: base.InternalKeySeqNumMax,
1181 1 : }
1182 1 : }
1183 :
1184 1 : func (b *Batch) newRangeDelIter(o *IterOptions, batchSnapshot uint64) *keyspan.Iter {
1185 1 : // Construct an iterator even if rangeDelIndex is nil, because it is allowed
1186 1 : // to refresh later, so we need the container to exist.
1187 1 : iter := new(keyspan.Iter)
1188 1 : b.initRangeDelIter(o, iter, batchSnapshot)
1189 1 : return iter
1190 1 : }
1191 :
1192 1 : func (b *Batch) initRangeDelIter(_ *IterOptions, iter *keyspan.Iter, batchSnapshot uint64) {
1193 1 : if b.rangeDelIndex == nil {
1194 1 : iter.Init(b.cmp, nil)
1195 1 : return
1196 1 : }
1197 :
1198 : // Fragment the range tombstones the first time a range deletion iterator is
1199 : // requested. The cached tombstones are invalidated if another range
1200 : // deletion tombstone is added to the batch. This cache is only guaranteed
1201 : // to be correct if we're opening an iterator to read at a batch sequence
1202 : // number at least as high as tombstonesSeqNum. The cache is guaranteed to
1203 : // include all tombstones up to tombstonesSeqNum, and if any additional
1204 : // tombstones were added after that sequence number the cache would've been
1205 : // cleared.
1206 1 : nextSeqNum := b.nextSeqNum()
1207 1 : if b.tombstones != nil && b.tombstonesSeqNum <= batchSnapshot {
1208 1 : iter.Init(b.cmp, b.tombstones)
1209 1 : return
1210 1 : }
1211 :
1212 1 : tombstones := make([]keyspan.Span, 0, b.countRangeDels)
1213 1 : frag := &keyspan.Fragmenter{
1214 1 : Cmp: b.cmp,
1215 1 : Format: b.formatKey,
1216 1 : Emit: func(s keyspan.Span) {
1217 1 : tombstones = append(tombstones, s)
1218 1 : },
1219 : }
1220 1 : it := &batchIter{
1221 1 : cmp: b.cmp,
1222 1 : batch: b,
1223 1 : iter: b.rangeDelIndex.NewIter(nil, nil),
1224 1 : snapshot: batchSnapshot,
1225 1 : }
1226 1 : fragmentRangeDels(frag, it, int(b.countRangeDels))
1227 1 : iter.Init(b.cmp, tombstones)
1228 1 :
1229 1 : // If we just read all the tombstones in the batch (eg, batchSnapshot was
1230 1 : // set to b.nextSeqNum()), then cache the tombstones so that a subsequent
1231 1 : // call to initRangeDelIter may use them without refragmenting.
1232 1 : if nextSeqNum == batchSnapshot {
1233 1 : b.tombstones = tombstones
1234 1 : b.tombstonesSeqNum = nextSeqNum
1235 1 : }
1236 : }
1237 :
1238 1 : func fragmentRangeDels(frag *keyspan.Fragmenter, it internalIterator, count int) {
1239 1 : // The memory management here is a bit subtle. The keys and values returned
1240 1 : // by the iterator are slices in Batch.data. Thus the fragmented tombstones
1241 1 : // are slices within Batch.data. If additional entries are added to the
1242 1 : // Batch, Batch.data may be reallocated. The references in the fragmented
1243 1 : // tombstones will remain valid, pointing into the old Batch.data. GC for
1244 1 : // the win.
1245 1 :
1246 1 : // Use a single []keyspan.Key buffer to avoid allocating many
1247 1 : // individual []keyspan.Key slices with a single element each.
1248 1 : keyBuf := make([]keyspan.Key, 0, count)
1249 1 : for key, val := it.First(); key != nil; key, val = it.Next() {
1250 1 : s := rangedel.Decode(*key, val.InPlaceValue(), keyBuf)
1251 1 : keyBuf = s.Keys[len(s.Keys):]
1252 1 :
1253 1 : // Set a fixed capacity to avoid accidental overwriting.
1254 1 : s.Keys = s.Keys[:len(s.Keys):len(s.Keys)]
1255 1 : frag.Add(s)
1256 1 : }
1257 1 : frag.Finish()
1258 : }
1259 :
1260 1 : func (b *Batch) newRangeKeyIter(o *IterOptions, batchSnapshot uint64) *keyspan.Iter {
1261 1 : // Construct an iterator even if rangeKeyIndex is nil, because it is allowed
1262 1 : // to refresh later, so we need the container to exist.
1263 1 : iter := new(keyspan.Iter)
1264 1 : b.initRangeKeyIter(o, iter, batchSnapshot)
1265 1 : return iter
1266 1 : }
1267 :
1268 1 : func (b *Batch) initRangeKeyIter(_ *IterOptions, iter *keyspan.Iter, batchSnapshot uint64) {
1269 1 : if b.rangeKeyIndex == nil {
1270 1 : iter.Init(b.cmp, nil)
1271 1 : return
1272 1 : }
1273 :
1274 : // Fragment the range keys the first time a range key iterator is requested.
1275 : // The cached spans are invalidated if another range key is added to the
1276 : // batch. This cache is only guaranteed to be correct if we're opening an
1277 : // iterator to read at a batch sequence number at least as high as
1278 : // rangeKeysSeqNum. The cache is guaranteed to include all range keys up to
1279 : // rangeKeysSeqNum, and if any additional range keys were added after that
1280 : // sequence number the cache would've been cleared.
1281 1 : nextSeqNum := b.nextSeqNum()
1282 1 : if b.rangeKeys != nil && b.rangeKeysSeqNum <= batchSnapshot {
1283 1 : iter.Init(b.cmp, b.rangeKeys)
1284 1 : return
1285 1 : }
1286 :
1287 1 : rangeKeys := make([]keyspan.Span, 0, b.countRangeKeys)
1288 1 : frag := &keyspan.Fragmenter{
1289 1 : Cmp: b.cmp,
1290 1 : Format: b.formatKey,
1291 1 : Emit: func(s keyspan.Span) {
1292 1 : rangeKeys = append(rangeKeys, s)
1293 1 : },
1294 : }
1295 1 : it := &batchIter{
1296 1 : cmp: b.cmp,
1297 1 : batch: b,
1298 1 : iter: b.rangeKeyIndex.NewIter(nil, nil),
1299 1 : snapshot: batchSnapshot,
1300 1 : }
1301 1 : fragmentRangeKeys(frag, it, int(b.countRangeKeys))
1302 1 : iter.Init(b.cmp, rangeKeys)
1303 1 :
1304 1 : // If we just read all the range keys in the batch (eg, batchSnapshot was
1305 1 : // set to b.nextSeqNum()), then cache the range keys so that a subsequent
1306 1 : // call to initRangeKeyIter may use them without refragmenting.
1307 1 : if nextSeqNum == batchSnapshot {
1308 1 : b.rangeKeys = rangeKeys
1309 1 : b.rangeKeysSeqNum = nextSeqNum
1310 1 : }
1311 : }
1312 :
1313 1 : func fragmentRangeKeys(frag *keyspan.Fragmenter, it internalIterator, count int) error {
1314 1 : // The memory management here is a bit subtle. The keys and values
1315 1 : // returned by the iterator are slices in Batch.data. Thus the
1316 1 : // fragmented key spans are slices within Batch.data. If additional
1317 1 : // entries are added to the Batch, Batch.data may be reallocated. The
1318 1 : // references in the fragmented keys will remain valid, pointing into
1319 1 : // the old Batch.data. GC for the win.
1320 1 :
1321 1 : // Use a single []keyspan.Key buffer to avoid allocating many
1322 1 : // individual []keyspan.Key slices with a single element each.
1323 1 : keyBuf := make([]keyspan.Key, 0, count)
1324 1 : for ik, val := it.First(); ik != nil; ik, val = it.Next() {
1325 1 : s, err := rangekey.Decode(*ik, val.InPlaceValue(), keyBuf)
1326 1 : if err != nil {
1327 0 : return err
1328 0 : }
1329 1 : keyBuf = s.Keys[len(s.Keys):]
1330 1 :
1331 1 : // Set a fixed capacity to avoid accidental overwriting.
1332 1 : s.Keys = s.Keys[:len(s.Keys):len(s.Keys)]
1333 1 : frag.Add(s)
1334 : }
1335 1 : frag.Finish()
1336 1 : return nil
1337 : }
1338 :
1339 : // Commit applies the batch to its parent writer.
1340 1 : func (b *Batch) Commit(o *WriteOptions) error {
1341 1 : return b.db.Apply(b, o)
1342 1 : }
1343 :
1344 : // Close closes the batch without committing it.
1345 1 : func (b *Batch) Close() error {
1346 1 : b.release()
1347 1 : return nil
1348 1 : }
1349 :
1350 : // Indexed returns true if the batch is indexed (i.e. supports read
1351 : // operations).
1352 1 : func (b *Batch) Indexed() bool {
1353 1 : return b.index != nil
1354 1 : }
1355 :
1356 : // init ensures that the batch data slice is initialized to meet the
1357 : // minimum required size and allocates space for the batch header.
1358 1 : func (b *Batch) init(size int) {
1359 1 : n := batchInitialSize
1360 1 : for n < size {
1361 1 : n *= 2
1362 1 : }
1363 1 : if cap(b.data) < n {
1364 1 : b.data = rawalloc.New(batchHeaderLen, n)
1365 1 : }
1366 1 : b.data = b.data[:batchHeaderLen]
1367 1 : clear(b.data) // Zero the sequence number in the header
1368 : }
1369 :
1370 : // Reset resets the batch for reuse. The underlying byte slice (that is
1371 : // returned by Repr()) may not be modified. It is only necessary to call this
1372 : // method if a batch is explicitly being reused. Close automatically takes are
1373 : // of releasing resources when appropriate for batches that are internally
1374 : // being reused.
1375 1 : func (b *Batch) Reset() {
1376 1 : // Zero out the struct, retaining only the fields necessary for manual
1377 1 : // reuse.
1378 1 : b.batchInternal = batchInternal{
1379 1 : data: b.data,
1380 1 : cmp: b.cmp,
1381 1 : formatKey: b.formatKey,
1382 1 : abbreviatedKey: b.abbreviatedKey,
1383 1 : index: b.index,
1384 1 : db: b.db,
1385 1 : }
1386 1 : b.applied.Store(false)
1387 1 : if b.data != nil {
1388 1 : if cap(b.data) > batchMaxRetainedSize {
1389 1 : // If the capacity of the buffer is larger than our maximum
1390 1 : // retention size, don't re-use it. Let it be GC-ed instead.
1391 1 : // This prevents the memory from an unusually large batch from
1392 1 : // being held on to indefinitely.
1393 1 : b.data = nil
1394 1 : } else {
1395 1 : // Otherwise, reset the buffer for re-use.
1396 1 : b.data = b.data[:batchHeaderLen]
1397 1 : clear(b.data)
1398 1 : }
1399 : }
1400 1 : if b.index != nil {
1401 1 : b.index.Init(&b.data, b.cmp, b.abbreviatedKey)
1402 1 : }
1403 : }
1404 :
1405 : // seqNumData returns the 8 byte little-endian sequence number. Zero means that
1406 : // the batch has not yet been applied.
1407 1 : func (b *Batch) seqNumData() []byte {
1408 1 : return b.data[:8]
1409 1 : }
1410 :
1411 : // countData returns the 4 byte little-endian count data. "\xff\xff\xff\xff"
1412 : // means that the batch is invalid.
1413 1 : func (b *Batch) countData() []byte {
1414 1 : return b.data[8:12]
1415 1 : }
1416 :
1417 1 : func (b *Batch) grow(n int) {
1418 1 : newSize := len(b.data) + n
1419 1 : if uint64(newSize) >= maxBatchSize {
1420 1 : panic(ErrBatchTooLarge)
1421 : }
1422 1 : if newSize > cap(b.data) {
1423 1 : newCap := 2 * cap(b.data)
1424 1 : for newCap < newSize {
1425 1 : newCap *= 2
1426 1 : }
1427 1 : newData := rawalloc.New(len(b.data), newCap)
1428 1 : copy(newData, b.data)
1429 1 : b.data = newData
1430 : }
1431 1 : b.data = b.data[:newSize]
1432 : }
1433 :
1434 1 : func (b *Batch) setSeqNum(seqNum uint64) {
1435 1 : binary.LittleEndian.PutUint64(b.seqNumData(), seqNum)
1436 1 : }
1437 :
1438 : // SeqNum returns the batch sequence number which is applied to the first
1439 : // record in the batch. The sequence number is incremented for each subsequent
1440 : // record. It returns zero if the batch is empty.
1441 1 : func (b *Batch) SeqNum() uint64 {
1442 1 : if len(b.data) == 0 {
1443 1 : b.init(batchHeaderLen)
1444 1 : }
1445 1 : return binary.LittleEndian.Uint64(b.seqNumData())
1446 : }
1447 :
1448 1 : func (b *Batch) setCount(v uint32) {
1449 1 : b.count = uint64(v)
1450 1 : }
1451 :
1452 : // Count returns the count of memtable-modifying operations in this batch. All
1453 : // operations with the except of LogData increment this count. For IngestSSTs,
1454 : // count is only used to indicate the number of SSTs ingested in the record, the
1455 : // batch isn't applied to the memtable.
1456 1 : func (b *Batch) Count() uint32 {
1457 1 : if b.count > math.MaxUint32 {
1458 1 : panic(ErrInvalidBatch)
1459 : }
1460 1 : return uint32(b.count)
1461 : }
1462 :
1463 : // Reader returns a BatchReader for the current batch contents. If the batch is
1464 : // mutated, the new entries will not be visible to the reader.
1465 1 : func (b *Batch) Reader() BatchReader {
1466 1 : if len(b.data) == 0 {
1467 1 : b.init(batchHeaderLen)
1468 1 : }
1469 1 : return b.data[batchHeaderLen:]
1470 : }
1471 :
1472 1 : func batchDecodeStr(data []byte) (odata []byte, s []byte, ok bool) {
1473 1 : var v uint32
1474 1 : var n int
1475 1 : ptr := unsafe.Pointer(&data[0])
1476 1 : if a := *((*uint8)(ptr)); a < 128 {
1477 1 : v = uint32(a)
1478 1 : n = 1
1479 1 : } else if a, b := a&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 1))); b < 128 {
1480 1 : v = uint32(b)<<7 | uint32(a)
1481 1 : n = 2
1482 1 : } else if b, c := b&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 2))); c < 128 {
1483 1 : v = uint32(c)<<14 | uint32(b)<<7 | uint32(a)
1484 1 : n = 3
1485 1 : } else if c, d := c&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 3))); d < 128 {
1486 1 : v = uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
1487 1 : n = 4
1488 1 : } else {
1489 0 : d, e := d&0x7f, *((*uint8)(unsafe.Pointer(uintptr(ptr) + 4)))
1490 0 : v = uint32(e)<<28 | uint32(d)<<21 | uint32(c)<<14 | uint32(b)<<7 | uint32(a)
1491 0 : n = 5
1492 0 : }
1493 :
1494 1 : data = data[n:]
1495 1 : if v > uint32(len(data)) {
1496 0 : return nil, nil, false
1497 0 : }
1498 1 : return data[v:], data[:v], true
1499 : }
1500 :
1501 : // SyncWait is to be used in conjunction with DB.ApplyNoSyncWait.
1502 1 : func (b *Batch) SyncWait() error {
1503 1 : now := time.Now()
1504 1 : b.fsyncWait.Wait()
1505 1 : if b.commitErr != nil {
1506 0 : b.db = nil // prevent batch reuse on error
1507 0 : }
1508 1 : waitDuration := time.Since(now)
1509 1 : b.commitStats.CommitWaitDuration += waitDuration
1510 1 : b.commitStats.TotalDuration += waitDuration
1511 1 : return b.commitErr
1512 : }
1513 :
1514 : // CommitStats returns stats related to committing the batch. Should be called
1515 : // after Batch.Commit, DB.Apply. If DB.ApplyNoSyncWait is used, should be
1516 : // called after Batch.SyncWait.
1517 1 : func (b *Batch) CommitStats() BatchCommitStats {
1518 1 : return b.commitStats
1519 1 : }
1520 :
1521 : // BatchReader iterates over the entries contained in a batch.
1522 : type BatchReader []byte
1523 :
1524 : // ReadBatch constructs a BatchReader from a batch representation. The
1525 : // header is not validated. ReadBatch returns a new batch reader and the
1526 : // count of entries contained within the batch.
1527 1 : func ReadBatch(repr []byte) (r BatchReader, count uint32) {
1528 1 : if len(repr) <= batchHeaderLen {
1529 1 : return nil, count
1530 1 : }
1531 1 : count = binary.LittleEndian.Uint32(repr[batchCountOffset:batchHeaderLen])
1532 1 : return repr[batchHeaderLen:], count
1533 : }
1534 :
1535 : // Next returns the next entry in this batch. The final return value is false
1536 : // if the batch is corrupt. The end of batch is reached when len(r)==0.
1537 1 : func (r *BatchReader) Next() (kind InternalKeyKind, ukey []byte, value []byte, ok bool) {
1538 1 : if len(*r) == 0 {
1539 1 : return 0, nil, nil, false
1540 1 : }
1541 1 : kind = InternalKeyKind((*r)[0])
1542 1 : if kind > InternalKeyKindMax {
1543 0 : return 0, nil, nil, false
1544 0 : }
1545 1 : *r, ukey, ok = batchDecodeStr((*r)[1:])
1546 1 : if !ok {
1547 0 : return 0, nil, nil, false
1548 0 : }
1549 1 : switch kind {
1550 : case InternalKeyKindSet, InternalKeyKindMerge, InternalKeyKindRangeDelete,
1551 : InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete,
1552 1 : InternalKeyKindDeleteSized:
1553 1 : *r, value, ok = batchDecodeStr(*r)
1554 1 : if !ok {
1555 0 : return 0, nil, nil, false
1556 0 : }
1557 : }
1558 1 : return kind, ukey, value, true
1559 : }
1560 :
1561 : // Note: batchIter mirrors the implementation of flushableBatchIter. Keep the
1562 : // two in sync.
1563 : type batchIter struct {
1564 : cmp Compare
1565 : batch *Batch
1566 : iter batchskl.Iterator
1567 : err error
1568 : // snapshot holds a batch "sequence number" at which the batch is being
1569 : // read. This sequence number has the InternalKeySeqNumBatch bit set, so it
1570 : // encodes an offset within the batch. Only batch entries earlier than the
1571 : // offset are visible during iteration.
1572 : snapshot uint64
1573 : }
1574 :
1575 : // batchIter implements the base.InternalIterator interface.
1576 : var _ base.InternalIterator = (*batchIter)(nil)
1577 :
1578 0 : func (i *batchIter) String() string {
1579 0 : return "batch"
1580 0 : }
1581 :
1582 1 : func (i *batchIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, base.LazyValue) {
1583 1 : // Ignore TrySeekUsingNext if the view of the batch changed.
1584 1 : if flags.TrySeekUsingNext() && flags.BatchJustRefreshed() {
1585 1 : flags = flags.DisableTrySeekUsingNext()
1586 1 : }
1587 :
1588 1 : i.err = nil // clear cached iteration error
1589 1 : ikey := i.iter.SeekGE(key, flags)
1590 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1591 0 : ikey = i.iter.Next()
1592 0 : }
1593 1 : if ikey == nil {
1594 1 : return nil, base.LazyValue{}
1595 1 : }
1596 1 : return ikey, base.MakeInPlaceValue(i.value())
1597 : }
1598 :
1599 : func (i *batchIter) SeekPrefixGE(
1600 : prefix, key []byte, flags base.SeekGEFlags,
1601 1 : ) (*base.InternalKey, base.LazyValue) {
1602 1 : i.err = nil // clear cached iteration error
1603 1 : return i.SeekGE(key, flags)
1604 1 : }
1605 :
1606 1 : func (i *batchIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, base.LazyValue) {
1607 1 : i.err = nil // clear cached iteration error
1608 1 : ikey := i.iter.SeekLT(key)
1609 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1610 0 : ikey = i.iter.Prev()
1611 0 : }
1612 1 : if ikey == nil {
1613 1 : return nil, base.LazyValue{}
1614 1 : }
1615 1 : return ikey, base.MakeInPlaceValue(i.value())
1616 : }
1617 :
1618 1 : func (i *batchIter) First() (*InternalKey, base.LazyValue) {
1619 1 : i.err = nil // clear cached iteration error
1620 1 : ikey := i.iter.First()
1621 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1622 1 : ikey = i.iter.Next()
1623 1 : }
1624 1 : if ikey == nil {
1625 1 : return nil, base.LazyValue{}
1626 1 : }
1627 1 : return ikey, base.MakeInPlaceValue(i.value())
1628 : }
1629 :
1630 1 : func (i *batchIter) Last() (*InternalKey, base.LazyValue) {
1631 1 : i.err = nil // clear cached iteration error
1632 1 : ikey := i.iter.Last()
1633 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1634 0 : ikey = i.iter.Prev()
1635 0 : }
1636 1 : if ikey == nil {
1637 0 : return nil, base.LazyValue{}
1638 0 : }
1639 1 : return ikey, base.MakeInPlaceValue(i.value())
1640 : }
1641 :
1642 1 : func (i *batchIter) Next() (*InternalKey, base.LazyValue) {
1643 1 : ikey := i.iter.Next()
1644 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1645 1 : ikey = i.iter.Next()
1646 1 : }
1647 1 : if ikey == nil {
1648 1 : return nil, base.LazyValue{}
1649 1 : }
1650 1 : return ikey, base.MakeInPlaceValue(i.value())
1651 : }
1652 :
1653 0 : func (i *batchIter) NextPrefix(succKey []byte) (*InternalKey, LazyValue) {
1654 0 : // Because NextPrefix was invoked `succKey` must be ≥ the key at i's current
1655 0 : // position. Seek the arena iterator using TrySeekUsingNext.
1656 0 : ikey := i.iter.SeekGE(succKey, base.SeekGEFlagsNone.EnableTrySeekUsingNext())
1657 0 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1658 0 : ikey = i.iter.Next()
1659 0 : }
1660 0 : if ikey == nil {
1661 0 : return nil, base.LazyValue{}
1662 0 : }
1663 0 : return ikey, base.MakeInPlaceValue(i.value())
1664 : }
1665 :
1666 1 : func (i *batchIter) Prev() (*InternalKey, base.LazyValue) {
1667 1 : ikey := i.iter.Prev()
1668 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1669 0 : ikey = i.iter.Prev()
1670 0 : }
1671 1 : if ikey == nil {
1672 1 : return nil, base.LazyValue{}
1673 1 : }
1674 1 : return ikey, base.MakeInPlaceValue(i.value())
1675 : }
1676 :
1677 1 : func (i *batchIter) value() []byte {
1678 1 : offset, _, keyEnd := i.iter.KeyInfo()
1679 1 : data := i.batch.data
1680 1 : if len(data[offset:]) == 0 {
1681 0 : i.err = base.CorruptionErrorf("corrupted batch")
1682 0 : return nil
1683 0 : }
1684 :
1685 1 : switch InternalKeyKind(data[offset]) {
1686 : case InternalKeyKindSet, InternalKeyKindMerge, InternalKeyKindRangeDelete,
1687 : InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete,
1688 1 : InternalKeyKindDeleteSized:
1689 1 : _, value, ok := batchDecodeStr(data[keyEnd:])
1690 1 : if !ok {
1691 0 : return nil
1692 0 : }
1693 1 : return value
1694 1 : default:
1695 1 : return nil
1696 : }
1697 : }
1698 :
1699 1 : func (i *batchIter) Error() error {
1700 1 : return i.err
1701 1 : }
1702 :
1703 1 : func (i *batchIter) Close() error {
1704 1 : _ = i.iter.Close()
1705 1 : return i.err
1706 1 : }
1707 :
1708 0 : func (i *batchIter) SetBounds(lower, upper []byte) {
1709 0 : i.iter.SetBounds(lower, upper)
1710 0 : }
1711 :
1712 0 : func (i *batchIter) SetContext(_ context.Context) {}
1713 :
1714 : type flushableBatchEntry struct {
1715 : // offset is the byte offset of the record within the batch repr.
1716 : offset uint32
1717 : // index is the 0-based ordinal number of the record within the batch. Used
1718 : // to compute the seqnum for the record.
1719 : index uint32
1720 : // key{Start,End} are the start and end byte offsets of the key within the
1721 : // batch repr. Cached to avoid decoding the key length on every
1722 : // comparison. The value is stored starting at keyEnd.
1723 : keyStart uint32
1724 : keyEnd uint32
1725 : }
1726 :
1727 : // flushableBatch wraps an existing batch and provides the interfaces needed
1728 : // for making the batch flushable (i.e. able to mimic a memtable).
1729 : type flushableBatch struct {
1730 : cmp Compare
1731 : formatKey base.FormatKey
1732 : data []byte
1733 :
1734 : // The base sequence number for the entries in the batch. This is the same
1735 : // value as Batch.seqNum() and is cached here for performance.
1736 : seqNum uint64
1737 :
1738 : // A slice of offsets and indices for the entries in the batch. Used to
1739 : // implement flushableBatchIter. Unlike the indexing on a normal batch, a
1740 : // flushable batch is indexed such that batch entry i will be given the
1741 : // sequence number flushableBatch.seqNum+i.
1742 : //
1743 : // Sorted in increasing order of key and decreasing order of offset (since
1744 : // higher offsets correspond to higher sequence numbers).
1745 : //
1746 : // Does not include range deletion entries or range key entries.
1747 : offsets []flushableBatchEntry
1748 :
1749 : // Fragmented range deletion tombstones.
1750 : tombstones []keyspan.Span
1751 :
1752 : // Fragmented range keys.
1753 : rangeKeys []keyspan.Span
1754 : }
1755 :
1756 : var _ flushable = (*flushableBatch)(nil)
1757 :
1758 : // newFlushableBatch creates a new batch that implements the flushable
1759 : // interface. This allows the batch to act like a memtable and be placed in the
1760 : // queue of flushable memtables. Note that the flushable batch takes ownership
1761 : // of the batch data.
1762 1 : func newFlushableBatch(batch *Batch, comparer *Comparer) *flushableBatch {
1763 1 : b := &flushableBatch{
1764 1 : data: batch.data,
1765 1 : cmp: comparer.Compare,
1766 1 : formatKey: comparer.FormatKey,
1767 1 : offsets: make([]flushableBatchEntry, 0, batch.Count()),
1768 1 : }
1769 1 : if b.data != nil {
1770 1 : // Note that this sequence number is not correct when this batch has not
1771 1 : // been applied since the sequence number has not been assigned yet. The
1772 1 : // correct sequence number will be set later. But it is correct when the
1773 1 : // batch is being replayed from the WAL.
1774 1 : b.seqNum = batch.SeqNum()
1775 1 : }
1776 1 : var rangeDelOffsets []flushableBatchEntry
1777 1 : var rangeKeyOffsets []flushableBatchEntry
1778 1 : if len(b.data) > batchHeaderLen {
1779 1 : // Non-empty batch.
1780 1 : var index uint32
1781 1 : for iter := BatchReader(b.data[batchHeaderLen:]); len(iter) > 0; index++ {
1782 1 : offset := uintptr(unsafe.Pointer(&iter[0])) - uintptr(unsafe.Pointer(&b.data[0]))
1783 1 : kind, key, _, ok := iter.Next()
1784 1 : if !ok {
1785 0 : break
1786 : }
1787 1 : entry := flushableBatchEntry{
1788 1 : offset: uint32(offset),
1789 1 : index: uint32(index),
1790 1 : }
1791 1 : if keySize := uint32(len(key)); keySize == 0 {
1792 1 : // Must add 2 to the offset. One byte encodes `kind` and the next
1793 1 : // byte encodes `0`, which is the length of the key.
1794 1 : entry.keyStart = uint32(offset) + 2
1795 1 : entry.keyEnd = entry.keyStart
1796 1 : } else {
1797 1 : entry.keyStart = uint32(uintptr(unsafe.Pointer(&key[0])) -
1798 1 : uintptr(unsafe.Pointer(&b.data[0])))
1799 1 : entry.keyEnd = entry.keyStart + keySize
1800 1 : }
1801 1 : switch kind {
1802 1 : case InternalKeyKindRangeDelete:
1803 1 : rangeDelOffsets = append(rangeDelOffsets, entry)
1804 1 : case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
1805 1 : rangeKeyOffsets = append(rangeKeyOffsets, entry)
1806 1 : default:
1807 1 : b.offsets = append(b.offsets, entry)
1808 : }
1809 : }
1810 : }
1811 :
1812 : // Sort all of offsets, rangeDelOffsets and rangeKeyOffsets, using *batch's
1813 : // sort.Interface implementation.
1814 1 : pointOffsets := b.offsets
1815 1 : sort.Sort(b)
1816 1 : b.offsets = rangeDelOffsets
1817 1 : sort.Sort(b)
1818 1 : b.offsets = rangeKeyOffsets
1819 1 : sort.Sort(b)
1820 1 : b.offsets = pointOffsets
1821 1 :
1822 1 : if len(rangeDelOffsets) > 0 {
1823 1 : frag := &keyspan.Fragmenter{
1824 1 : Cmp: b.cmp,
1825 1 : Format: b.formatKey,
1826 1 : Emit: func(s keyspan.Span) {
1827 1 : b.tombstones = append(b.tombstones, s)
1828 1 : },
1829 : }
1830 1 : it := &flushableBatchIter{
1831 1 : batch: b,
1832 1 : data: b.data,
1833 1 : offsets: rangeDelOffsets,
1834 1 : cmp: b.cmp,
1835 1 : index: -1,
1836 1 : }
1837 1 : fragmentRangeDels(frag, it, len(rangeDelOffsets))
1838 : }
1839 1 : if len(rangeKeyOffsets) > 0 {
1840 1 : frag := &keyspan.Fragmenter{
1841 1 : Cmp: b.cmp,
1842 1 : Format: b.formatKey,
1843 1 : Emit: func(s keyspan.Span) {
1844 1 : b.rangeKeys = append(b.rangeKeys, s)
1845 1 : },
1846 : }
1847 1 : it := &flushableBatchIter{
1848 1 : batch: b,
1849 1 : data: b.data,
1850 1 : offsets: rangeKeyOffsets,
1851 1 : cmp: b.cmp,
1852 1 : index: -1,
1853 1 : }
1854 1 : fragmentRangeKeys(frag, it, len(rangeKeyOffsets))
1855 : }
1856 1 : return b
1857 : }
1858 :
1859 1 : func (b *flushableBatch) setSeqNum(seqNum uint64) {
1860 1 : if b.seqNum != 0 {
1861 0 : panic(fmt.Sprintf("pebble: flushableBatch.seqNum already set: %d", b.seqNum))
1862 : }
1863 1 : b.seqNum = seqNum
1864 1 : for i := range b.tombstones {
1865 1 : for j := range b.tombstones[i].Keys {
1866 1 : b.tombstones[i].Keys[j].Trailer = base.MakeTrailer(
1867 1 : b.tombstones[i].Keys[j].SeqNum()+seqNum,
1868 1 : b.tombstones[i].Keys[j].Kind(),
1869 1 : )
1870 1 : }
1871 : }
1872 1 : for i := range b.rangeKeys {
1873 1 : for j := range b.rangeKeys[i].Keys {
1874 1 : b.rangeKeys[i].Keys[j].Trailer = base.MakeTrailer(
1875 1 : b.rangeKeys[i].Keys[j].SeqNum()+seqNum,
1876 1 : b.rangeKeys[i].Keys[j].Kind(),
1877 1 : )
1878 1 : }
1879 : }
1880 : }
1881 :
1882 1 : func (b *flushableBatch) Len() int {
1883 1 : return len(b.offsets)
1884 1 : }
1885 :
1886 1 : func (b *flushableBatch) Less(i, j int) bool {
1887 1 : ei := &b.offsets[i]
1888 1 : ej := &b.offsets[j]
1889 1 : ki := b.data[ei.keyStart:ei.keyEnd]
1890 1 : kj := b.data[ej.keyStart:ej.keyEnd]
1891 1 : switch c := b.cmp(ki, kj); {
1892 1 : case c < 0:
1893 1 : return true
1894 1 : case c > 0:
1895 1 : return false
1896 1 : default:
1897 1 : return ei.offset > ej.offset
1898 : }
1899 : }
1900 :
1901 1 : func (b *flushableBatch) Swap(i, j int) {
1902 1 : b.offsets[i], b.offsets[j] = b.offsets[j], b.offsets[i]
1903 1 : }
1904 :
1905 : // newIter is part of the flushable interface.
1906 1 : func (b *flushableBatch) newIter(o *IterOptions) internalIterator {
1907 1 : return &flushableBatchIter{
1908 1 : batch: b,
1909 1 : data: b.data,
1910 1 : offsets: b.offsets,
1911 1 : cmp: b.cmp,
1912 1 : index: -1,
1913 1 : lower: o.GetLowerBound(),
1914 1 : upper: o.GetUpperBound(),
1915 1 : }
1916 1 : }
1917 :
1918 : // newFlushIter is part of the flushable interface.
1919 1 : func (b *flushableBatch) newFlushIter(o *IterOptions, bytesFlushed *uint64) internalIterator {
1920 1 : return &flushFlushableBatchIter{
1921 1 : flushableBatchIter: flushableBatchIter{
1922 1 : batch: b,
1923 1 : data: b.data,
1924 1 : offsets: b.offsets,
1925 1 : cmp: b.cmp,
1926 1 : index: -1,
1927 1 : },
1928 1 : bytesIterated: bytesFlushed,
1929 1 : }
1930 1 : }
1931 :
1932 : // newRangeDelIter is part of the flushable interface.
1933 1 : func (b *flushableBatch) newRangeDelIter(o *IterOptions) keyspan.FragmentIterator {
1934 1 : if len(b.tombstones) == 0 {
1935 1 : return nil
1936 1 : }
1937 1 : return keyspan.NewIter(b.cmp, b.tombstones)
1938 : }
1939 :
1940 : // newRangeKeyIter is part of the flushable interface.
1941 1 : func (b *flushableBatch) newRangeKeyIter(o *IterOptions) keyspan.FragmentIterator {
1942 1 : if len(b.rangeKeys) == 0 {
1943 1 : return nil
1944 1 : }
1945 1 : return keyspan.NewIter(b.cmp, b.rangeKeys)
1946 : }
1947 :
1948 : // containsRangeKeys is part of the flushable interface.
1949 0 : func (b *flushableBatch) containsRangeKeys() bool { return len(b.rangeKeys) > 0 }
1950 :
1951 : // inuseBytes is part of the flushable interface.
1952 1 : func (b *flushableBatch) inuseBytes() uint64 {
1953 1 : return uint64(len(b.data) - batchHeaderLen)
1954 1 : }
1955 :
1956 : // totalBytes is part of the flushable interface.
1957 1 : func (b *flushableBatch) totalBytes() uint64 {
1958 1 : return uint64(cap(b.data))
1959 1 : }
1960 :
1961 : // readyForFlush is part of the flushable interface.
1962 1 : func (b *flushableBatch) readyForFlush() bool {
1963 1 : // A flushable batch is always ready for flush; it must be flushed together
1964 1 : // with the previous memtable.
1965 1 : return true
1966 1 : }
1967 :
1968 : // Note: flushableBatchIter mirrors the implementation of batchIter. Keep the
1969 : // two in sync.
1970 : type flushableBatchIter struct {
1971 : // Members to be initialized by creator.
1972 : batch *flushableBatch
1973 : // The bytes backing the batch. Always the same as batch.data?
1974 : data []byte
1975 : // The sorted entries. This is not always equal to batch.offsets.
1976 : offsets []flushableBatchEntry
1977 : cmp Compare
1978 : // Must be initialized to -1. It is the index into offsets that represents
1979 : // the current iterator position.
1980 : index int
1981 :
1982 : // For internal use by the implementation.
1983 : key InternalKey
1984 : err error
1985 :
1986 : // Optionally initialize to bounds of iteration, if any.
1987 : lower []byte
1988 : upper []byte
1989 : }
1990 :
1991 : // flushableBatchIter implements the base.InternalIterator interface.
1992 : var _ base.InternalIterator = (*flushableBatchIter)(nil)
1993 :
1994 0 : func (i *flushableBatchIter) String() string {
1995 0 : return "flushable-batch"
1996 0 : }
1997 :
1998 : // SeekGE implements internalIterator.SeekGE, as documented in the pebble
1999 : // package. Ignore flags.TrySeekUsingNext() since we don't expect this
2000 : // optimization to provide much benefit here at the moment.
2001 : func (i *flushableBatchIter) SeekGE(
2002 : key []byte, flags base.SeekGEFlags,
2003 1 : ) (*InternalKey, base.LazyValue) {
2004 1 : i.err = nil // clear cached iteration error
2005 1 : ikey := base.MakeSearchKey(key)
2006 1 : i.index = sort.Search(len(i.offsets), func(j int) bool {
2007 1 : return base.InternalCompare(i.cmp, ikey, i.getKey(j)) <= 0
2008 1 : })
2009 1 : if i.index >= len(i.offsets) {
2010 1 : return nil, base.LazyValue{}
2011 1 : }
2012 1 : i.key = i.getKey(i.index)
2013 1 : if i.upper != nil && i.cmp(i.key.UserKey, i.upper) >= 0 {
2014 1 : i.index = len(i.offsets)
2015 1 : return nil, base.LazyValue{}
2016 1 : }
2017 1 : return &i.key, i.value()
2018 : }
2019 :
2020 : // SeekPrefixGE implements internalIterator.SeekPrefixGE, as documented in the
2021 : // pebble package.
2022 : func (i *flushableBatchIter) SeekPrefixGE(
2023 : prefix, key []byte, flags base.SeekGEFlags,
2024 1 : ) (*base.InternalKey, base.LazyValue) {
2025 1 : return i.SeekGE(key, flags)
2026 1 : }
2027 :
2028 : // SeekLT implements internalIterator.SeekLT, as documented in the pebble
2029 : // package.
2030 : func (i *flushableBatchIter) SeekLT(
2031 : key []byte, flags base.SeekLTFlags,
2032 1 : ) (*InternalKey, base.LazyValue) {
2033 1 : i.err = nil // clear cached iteration error
2034 1 : ikey := base.MakeSearchKey(key)
2035 1 : i.index = sort.Search(len(i.offsets), func(j int) bool {
2036 1 : return base.InternalCompare(i.cmp, ikey, i.getKey(j)) <= 0
2037 1 : })
2038 1 : i.index--
2039 1 : if i.index < 0 {
2040 1 : return nil, base.LazyValue{}
2041 1 : }
2042 1 : i.key = i.getKey(i.index)
2043 1 : if i.lower != nil && i.cmp(i.key.UserKey, i.lower) < 0 {
2044 1 : i.index = -1
2045 1 : return nil, base.LazyValue{}
2046 1 : }
2047 1 : return &i.key, i.value()
2048 : }
2049 :
2050 : // First implements internalIterator.First, as documented in the pebble
2051 : // package.
2052 1 : func (i *flushableBatchIter) First() (*InternalKey, base.LazyValue) {
2053 1 : i.err = nil // clear cached iteration error
2054 1 : if len(i.offsets) == 0 {
2055 1 : return nil, base.LazyValue{}
2056 1 : }
2057 1 : i.index = 0
2058 1 : i.key = i.getKey(i.index)
2059 1 : if i.upper != nil && i.cmp(i.key.UserKey, i.upper) >= 0 {
2060 1 : i.index = len(i.offsets)
2061 1 : return nil, base.LazyValue{}
2062 1 : }
2063 1 : return &i.key, i.value()
2064 : }
2065 :
2066 : // Last implements internalIterator.Last, as documented in the pebble
2067 : // package.
2068 1 : func (i *flushableBatchIter) Last() (*InternalKey, base.LazyValue) {
2069 1 : i.err = nil // clear cached iteration error
2070 1 : if len(i.offsets) == 0 {
2071 0 : return nil, base.LazyValue{}
2072 0 : }
2073 1 : i.index = len(i.offsets) - 1
2074 1 : i.key = i.getKey(i.index)
2075 1 : if i.lower != nil && i.cmp(i.key.UserKey, i.lower) < 0 {
2076 1 : i.index = -1
2077 1 : return nil, base.LazyValue{}
2078 1 : }
2079 1 : return &i.key, i.value()
2080 : }
2081 :
2082 : // Note: flushFlushableBatchIter.Next mirrors the implementation of
2083 : // flushableBatchIter.Next due to performance. Keep the two in sync.
2084 1 : func (i *flushableBatchIter) Next() (*InternalKey, base.LazyValue) {
2085 1 : if i.index == len(i.offsets) {
2086 0 : return nil, base.LazyValue{}
2087 0 : }
2088 1 : i.index++
2089 1 : if i.index == len(i.offsets) {
2090 1 : return nil, base.LazyValue{}
2091 1 : }
2092 1 : i.key = i.getKey(i.index)
2093 1 : if i.upper != nil && i.cmp(i.key.UserKey, i.upper) >= 0 {
2094 1 : i.index = len(i.offsets)
2095 1 : return nil, base.LazyValue{}
2096 1 : }
2097 1 : return &i.key, i.value()
2098 : }
2099 :
2100 1 : func (i *flushableBatchIter) Prev() (*InternalKey, base.LazyValue) {
2101 1 : if i.index < 0 {
2102 0 : return nil, base.LazyValue{}
2103 0 : }
2104 1 : i.index--
2105 1 : if i.index < 0 {
2106 1 : return nil, base.LazyValue{}
2107 1 : }
2108 1 : i.key = i.getKey(i.index)
2109 1 : if i.lower != nil && i.cmp(i.key.UserKey, i.lower) < 0 {
2110 1 : i.index = -1
2111 1 : return nil, base.LazyValue{}
2112 1 : }
2113 1 : return &i.key, i.value()
2114 : }
2115 :
2116 : // Note: flushFlushableBatchIter.NextPrefix mirrors the implementation of
2117 : // flushableBatchIter.NextPrefix due to performance. Keep the two in sync.
2118 0 : func (i *flushableBatchIter) NextPrefix(succKey []byte) (*InternalKey, LazyValue) {
2119 0 : return i.SeekGE(succKey, base.SeekGEFlagsNone.EnableTrySeekUsingNext())
2120 0 : }
2121 :
2122 1 : func (i *flushableBatchIter) getKey(index int) InternalKey {
2123 1 : e := &i.offsets[index]
2124 1 : kind := InternalKeyKind(i.data[e.offset])
2125 1 : key := i.data[e.keyStart:e.keyEnd]
2126 1 : return base.MakeInternalKey(key, i.batch.seqNum+uint64(e.index), kind)
2127 1 : }
2128 :
2129 1 : func (i *flushableBatchIter) value() base.LazyValue {
2130 1 : p := i.data[i.offsets[i.index].offset:]
2131 1 : if len(p) == 0 {
2132 0 : i.err = base.CorruptionErrorf("corrupted batch")
2133 0 : return base.LazyValue{}
2134 0 : }
2135 1 : kind := InternalKeyKind(p[0])
2136 1 : if kind > InternalKeyKindMax {
2137 0 : i.err = base.CorruptionErrorf("corrupted batch")
2138 0 : return base.LazyValue{}
2139 0 : }
2140 1 : var value []byte
2141 1 : var ok bool
2142 1 : switch kind {
2143 : case InternalKeyKindSet, InternalKeyKindMerge, InternalKeyKindRangeDelete,
2144 : InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete,
2145 1 : InternalKeyKindDeleteSized:
2146 1 : keyEnd := i.offsets[i.index].keyEnd
2147 1 : _, value, ok = batchDecodeStr(i.data[keyEnd:])
2148 1 : if !ok {
2149 0 : i.err = base.CorruptionErrorf("corrupted batch")
2150 0 : return base.LazyValue{}
2151 0 : }
2152 : }
2153 1 : return base.MakeInPlaceValue(value)
2154 : }
2155 :
2156 0 : func (i *flushableBatchIter) Valid() bool {
2157 0 : return i.index >= 0 && i.index < len(i.offsets)
2158 0 : }
2159 :
2160 1 : func (i *flushableBatchIter) Error() error {
2161 1 : return i.err
2162 1 : }
2163 :
2164 1 : func (i *flushableBatchIter) Close() error {
2165 1 : return i.err
2166 1 : }
2167 :
2168 1 : func (i *flushableBatchIter) SetBounds(lower, upper []byte) {
2169 1 : i.lower = lower
2170 1 : i.upper = upper
2171 1 : }
2172 :
2173 0 : func (i *flushableBatchIter) SetContext(_ context.Context) {}
2174 :
2175 : // flushFlushableBatchIter is similar to flushableBatchIter but it keeps track
2176 : // of number of bytes iterated.
2177 : type flushFlushableBatchIter struct {
2178 : flushableBatchIter
2179 : bytesIterated *uint64
2180 : }
2181 :
2182 : // flushFlushableBatchIter implements the base.InternalIterator interface.
2183 : var _ base.InternalIterator = (*flushFlushableBatchIter)(nil)
2184 :
2185 0 : func (i *flushFlushableBatchIter) String() string {
2186 0 : return "flushable-batch"
2187 0 : }
2188 :
2189 : func (i *flushFlushableBatchIter) SeekGE(
2190 : key []byte, flags base.SeekGEFlags,
2191 0 : ) (*InternalKey, base.LazyValue) {
2192 0 : panic("pebble: SeekGE unimplemented")
2193 : }
2194 :
2195 : func (i *flushFlushableBatchIter) SeekPrefixGE(
2196 : prefix, key []byte, flags base.SeekGEFlags,
2197 0 : ) (*base.InternalKey, base.LazyValue) {
2198 0 : panic("pebble: SeekPrefixGE unimplemented")
2199 : }
2200 :
2201 : func (i *flushFlushableBatchIter) SeekLT(
2202 : key []byte, flags base.SeekLTFlags,
2203 0 : ) (*InternalKey, base.LazyValue) {
2204 0 : panic("pebble: SeekLT unimplemented")
2205 : }
2206 :
2207 1 : func (i *flushFlushableBatchIter) First() (*InternalKey, base.LazyValue) {
2208 1 : i.err = nil // clear cached iteration error
2209 1 : key, val := i.flushableBatchIter.First()
2210 1 : if key == nil {
2211 0 : return nil, base.LazyValue{}
2212 0 : }
2213 1 : entryBytes := i.offsets[i.index].keyEnd - i.offsets[i.index].offset
2214 1 : *i.bytesIterated += uint64(entryBytes) + i.valueSize()
2215 1 : return key, val
2216 : }
2217 :
2218 0 : func (i *flushFlushableBatchIter) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) {
2219 0 : panic("pebble: Prev unimplemented")
2220 : }
2221 :
2222 : // Note: flushFlushableBatchIter.Next mirrors the implementation of
2223 : // flushableBatchIter.Next due to performance. Keep the two in sync.
2224 1 : func (i *flushFlushableBatchIter) Next() (*InternalKey, base.LazyValue) {
2225 1 : if i.index == len(i.offsets) {
2226 0 : return nil, base.LazyValue{}
2227 0 : }
2228 1 : i.index++
2229 1 : if i.index == len(i.offsets) {
2230 1 : return nil, base.LazyValue{}
2231 1 : }
2232 1 : i.key = i.getKey(i.index)
2233 1 : entryBytes := i.offsets[i.index].keyEnd - i.offsets[i.index].offset
2234 1 : *i.bytesIterated += uint64(entryBytes) + i.valueSize()
2235 1 : return &i.key, i.value()
2236 : }
2237 :
2238 0 : func (i flushFlushableBatchIter) Prev() (*InternalKey, base.LazyValue) {
2239 0 : panic("pebble: Prev unimplemented")
2240 : }
2241 :
2242 1 : func (i flushFlushableBatchIter) valueSize() uint64 {
2243 1 : p := i.data[i.offsets[i.index].offset:]
2244 1 : if len(p) == 0 {
2245 0 : i.err = base.CorruptionErrorf("corrupted batch")
2246 0 : return 0
2247 0 : }
2248 1 : kind := InternalKeyKind(p[0])
2249 1 : if kind > InternalKeyKindMax {
2250 0 : i.err = base.CorruptionErrorf("corrupted batch")
2251 0 : return 0
2252 0 : }
2253 1 : var length uint64
2254 1 : switch kind {
2255 1 : case InternalKeyKindSet, InternalKeyKindMerge, InternalKeyKindRangeDelete:
2256 1 : keyEnd := i.offsets[i.index].keyEnd
2257 1 : v, n := binary.Uvarint(i.data[keyEnd:])
2258 1 : if n <= 0 {
2259 0 : i.err = base.CorruptionErrorf("corrupted batch")
2260 0 : return 0
2261 0 : }
2262 1 : length = v + uint64(n)
2263 : }
2264 1 : return length
2265 : }
2266 :
2267 : // batchSort returns iterators for the sorted contents of the batch. It is
2268 : // intended for testing use only. The batch.Sort dance is done to prevent
2269 : // exposing this method in the public pebble interface.
2270 : func batchSort(
2271 : i interface{},
2272 : ) (
2273 : points internalIterator,
2274 : rangeDels keyspan.FragmentIterator,
2275 : rangeKeys keyspan.FragmentIterator,
2276 1 : ) {
2277 1 : b := i.(*Batch)
2278 1 : if b.Indexed() {
2279 1 : pointIter := b.newInternalIter(nil)
2280 1 : rangeDelIter := b.newRangeDelIter(nil, math.MaxUint64)
2281 1 : rangeKeyIter := b.newRangeKeyIter(nil, math.MaxUint64)
2282 1 : return pointIter, rangeDelIter, rangeKeyIter
2283 1 : }
2284 1 : f := newFlushableBatch(b, b.db.opts.Comparer)
2285 1 : return f.newIter(nil), f.newRangeDelIter(nil), f.newRangeKeyIter(nil)
2286 : }
2287 :
2288 1 : func init() {
2289 1 : private.BatchSort = batchSort
2290 1 : }
|