Line data Source code
1 : // Copyright 2012 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package pebble
6 :
7 : import (
8 : "context"
9 : "encoding/binary"
10 : "fmt"
11 : "io"
12 : "math"
13 : "sort"
14 : "sync"
15 : "sync/atomic"
16 : "time"
17 : "unsafe"
18 :
19 : "github.com/cockroachdb/errors"
20 : "github.com/cockroachdb/pebble/batchrepr"
21 : "github.com/cockroachdb/pebble/internal/base"
22 : "github.com/cockroachdb/pebble/internal/batchskl"
23 : "github.com/cockroachdb/pebble/internal/humanize"
24 : "github.com/cockroachdb/pebble/internal/invariants"
25 : "github.com/cockroachdb/pebble/internal/keyspan"
26 : "github.com/cockroachdb/pebble/internal/private"
27 : "github.com/cockroachdb/pebble/internal/rangedel"
28 : "github.com/cockroachdb/pebble/internal/rangekey"
29 : "github.com/cockroachdb/pebble/internal/rawalloc"
30 : )
31 :
32 : const (
33 : batchInitialSize = 1 << 10 // 1 KB
34 : batchMaxRetainedSize = 1 << 20 // 1 MB
35 : invalidBatchCount = 1<<32 - 1
36 : maxVarintLen32 = 5
37 : )
38 :
39 : // ErrNotIndexed means that a read operation on a batch failed because the
40 : // batch is not indexed and thus doesn't support reads.
41 : var ErrNotIndexed = errors.New("pebble: batch not indexed")
42 :
43 : // ErrInvalidBatch indicates that a batch is invalid or otherwise corrupted.
44 : var ErrInvalidBatch = batchrepr.ErrInvalidBatch
45 :
46 : // ErrBatchTooLarge indicates that a batch is invalid or otherwise corrupted.
47 : var ErrBatchTooLarge = base.MarkCorruptionError(errors.Newf("pebble: batch too large: >= %s", humanize.Bytes.Uint64(maxBatchSize)))
48 :
49 : // DeferredBatchOp represents a batch operation (eg. set, merge, delete) that is
50 : // being inserted into the batch. Indexing is not performed on the specified key
51 : // until Finish is called, hence the name deferred. This struct lets the caller
52 : // copy or encode keys/values directly into the batch representation instead of
53 : // copying into an intermediary buffer then having pebble.Batch copy off of it.
54 : type DeferredBatchOp struct {
55 : index *batchskl.Skiplist
56 :
57 : // Key and Value point to parts of the binary batch representation where
58 : // keys and values should be encoded/copied into. len(Key) and len(Value)
59 : // bytes must be copied into these slices respectively before calling
60 : // Finish(). Changing where these slices point to is not allowed.
61 : Key, Value []byte
62 : offset uint32
63 : }
64 :
65 : // Finish completes the addition of this batch operation, and adds it to the
66 : // index if necessary. Must be called once (and exactly once) keys/values
67 : // have been filled into Key and Value. Not calling Finish or not
68 : // copying/encoding keys will result in an incomplete index, and calling Finish
69 : // twice may result in a panic.
70 1 : func (d DeferredBatchOp) Finish() error {
71 1 : if d.index != nil {
72 0 : if err := d.index.Add(d.offset); err != nil {
73 0 : return err
74 0 : }
75 : }
76 1 : return nil
77 : }
78 :
79 : // A Batch is a sequence of Sets, Merges, Deletes, DeleteRanges, RangeKeySets,
80 : // RangeKeyUnsets, and/or RangeKeyDeletes that are applied atomically. Batch
81 : // implements the Reader interface, but only an indexed batch supports reading
82 : // (without error) via Get or NewIter. A non-indexed batch will return
83 : // ErrNotIndexed when read from. A batch is not safe for concurrent use, and
84 : // consumers should use a batch per goroutine or provide their own
85 : // synchronization.
86 : //
87 : // # Indexing
88 : //
89 : // Batches can be optionally indexed (see DB.NewIndexedBatch). An indexed batch
90 : // allows iteration via an Iterator (see Batch.NewIter). The iterator provides
91 : // a merged view of the operations in the batch and the underlying
92 : // database. This is implemented by treating the batch as an additional layer
93 : // in the LSM where every entry in the batch is considered newer than any entry
94 : // in the underlying database (batch entries have the InternalKeySeqNumBatch
95 : // bit set). By treating the batch as an additional layer in the LSM, iteration
96 : // supports all batch operations (i.e. Set, Merge, Delete, DeleteRange,
97 : // RangeKeySet, RangeKeyUnset, RangeKeyDelete) with minimal effort.
98 : //
99 : // The same key can be operated on multiple times in a batch, though only the
100 : // latest operation will be visible. For example, Put("a", "b"), Delete("a")
101 : // will cause the key "a" to not be visible in the batch. Put("a", "b"),
102 : // Put("a", "c") will cause a read of "a" to return the value "c".
103 : //
104 : // The batch index is implemented via an skiplist (internal/batchskl). While
105 : // the skiplist implementation is very fast, inserting into an indexed batch is
106 : // significantly slower than inserting into a non-indexed batch. Only use an
107 : // indexed batch if you require reading from it.
108 : //
109 : // # Atomic commit
110 : //
111 : // The operations in a batch are persisted by calling Batch.Commit which is
112 : // equivalent to calling DB.Apply(batch). A batch is committed atomically by
113 : // writing the internal batch representation to the WAL, adding all of the
114 : // batch operations to the memtable associated with the WAL, and then
115 : // incrementing the visible sequence number so that subsequent reads can see
116 : // the effects of the batch operations. If WriteOptions.Sync is true, a call to
117 : // Batch.Commit will guarantee that the batch is persisted to disk before
118 : // returning. See commitPipeline for more on the implementation details.
119 : //
120 : // # Large batches
121 : //
122 : // The size of a batch is limited only by available memory (be aware that
123 : // indexed batches require considerably additional memory for the skiplist
124 : // structure). A given WAL file has a single memtable associated with it (this
125 : // restriction could be removed, but doing so is onerous and complex). And a
126 : // memtable has a fixed size due to the underlying fixed size arena. Note that
127 : // this differs from RocksDB where a memtable can grow arbitrarily large using
128 : // a list of arena chunks. In RocksDB this is accomplished by storing pointers
129 : // in the arena memory, but that isn't possible in Go.
130 : //
131 : // During Batch.Commit, a batch which is larger than a threshold (>
132 : // MemTableSize/2) is wrapped in a flushableBatch and inserted into the queue
133 : // of memtables. A flushableBatch forces WAL to be rotated, but that happens
134 : // anyways when the memtable becomes full so this does not cause significant
135 : // WAL churn. Because the flushableBatch is readable as another layer in the
136 : // LSM, Batch.Commit returns as soon as the flushableBatch has been added to
137 : // the queue of memtables.
138 : //
139 : // Internally, a flushableBatch provides Iterator support by sorting the batch
140 : // contents (the batch is sorted once, when it is added to the memtable
141 : // queue). Sorting the batch contents and insertion of the contents into a
142 : // memtable have the same big-O time, but the constant factor dominates
143 : // here. Sorting is significantly faster and uses significantly less memory.
144 : //
145 : // # Internal representation
146 : //
147 : // The internal batch representation is a contiguous byte buffer with a fixed
148 : // 12-byte header, followed by a series of records.
149 : //
150 : // +-------------+------------+--- ... ---+
151 : // | SeqNum (8B) | Count (4B) | Entries |
152 : // +-------------+------------+--- ... ---+
153 : //
154 : // Each record has a 1-byte kind tag prefix, followed by 1 or 2 length prefixed
155 : // strings (varstring):
156 : //
157 : // +-----------+-----------------+-------------------+
158 : // | Kind (1B) | Key (varstring) | Value (varstring) |
159 : // +-----------+-----------------+-------------------+
160 : //
161 : // A varstring is a varint32 followed by N bytes of data. The Kind tags are
162 : // exactly those specified by InternalKeyKind. The following table shows the
163 : // format for records of each kind:
164 : //
165 : // InternalKeyKindDelete varstring
166 : // InternalKeyKindLogData varstring
167 : // InternalKeyKindIngestSST varstring
168 : // InternalKeyKindSet varstring varstring
169 : // InternalKeyKindMerge varstring varstring
170 : // InternalKeyKindRangeDelete varstring varstring
171 : // InternalKeyKindRangeKeySet varstring varstring
172 : // InternalKeyKindRangeKeyUnset varstring varstring
173 : // InternalKeyKindRangeKeyDelete varstring varstring
174 : //
175 : // The intuitive understanding here are that the arguments to Delete, Set,
176 : // Merge, DeleteRange and RangeKeyDelete are encoded into the batch. The
177 : // RangeKeySet and RangeKeyUnset operations are slightly more complicated,
178 : // encoding their end key, suffix and value [in the case of RangeKeySet] within
179 : // the Value varstring. For more information on the value encoding for
180 : // RangeKeySet and RangeKeyUnset, see the internal/rangekey package.
181 : //
182 : // The internal batch representation is the on disk format for a batch in the
183 : // WAL, and thus stable. New record kinds may be added, but the existing ones
184 : // will not be modified.
185 : type Batch struct {
186 : batchInternal
187 : applied atomic.Bool
188 : // lifecycle is used to negotiate the lifecycle of a Batch. A Batch and its
189 : // underlying batchInternal.data byte slice may be reused. There are two
190 : // mechanisms for reuse:
191 : //
192 : // 1. The caller may explicitly call [Batch.Reset] to reset the batch to be
193 : // empty (while retaining the underlying repr's buffer).
194 : // 2. The caller may call [Batch.Close], passing ownership off to Pebble,
195 : // which may reuse the batch's memory to service new callers to
196 : // [DB.NewBatch].
197 : //
198 : // There's a complication to reuse: When WAL failover is configured, the
199 : // Pebble commit pipeline may retain a pointer to the batch.data beyond the
200 : // return of [Batch.Commit]. The user of the Batch may commit their batch
201 : // and call Close or Reset before the commit pipeline is finished reading
202 : // the data slice. Recycling immediately would cause a data race.
203 : //
204 : // To resolve this data race, this [lifecycle] atomic is used to determine
205 : // safety and responsibility of reusing a batch. The low bits of the atomic
206 : // are used as a reference count (really just the lowest bit—in practice
207 : // there's only 1 code path that references). The [Batch.refData] func is
208 : // passed into [wal.Writer]'s WriteRecord method. The wal.Writer guarantees
209 : // that if it will read [Batch.data] after the call to WriteRecord returns,
210 : // it will increment the reference count. When it's complete, it'll
211 : // unreference through invoking [Batch.unrefData].
212 : //
213 : // When the committer of a batch indicates intent to recycle a Batch through
214 : // calling [Batch.Reset] or [Batch.Close], the lifecycle atomic is read. If
215 : // an outstanding reference remains, it's unsafe to reuse Batch.data yet. In
216 : // [Batch.Reset] the caller wants to reuse the [Batch] immediately, so we
217 : // discard b.data to recycle the struct but not the underlying byte slice.
218 : // In [Batch.Close], we set a special high bit [batchClosedBit] on lifecycle
219 : // that indicates that the user will not use [Batch] again and we're free to
220 : // recycle it when safe. When the commit pipeline eventually calls
221 : // [Batch.unrefData], the [batchClosedBit] is noticed and the batch is
222 : // recycled.
223 : lifecycle atomic.Int32
224 : }
225 :
226 : // batchClosedBit is a bit stored on Batch.lifecycle to indicate that the user
227 : // called [Batch.Close] to release a Batch, but an open reference count
228 : // prevented immediate recycling.
229 : const batchClosedBit = 1 << 30
230 :
231 : // refData is passed to (wal.Writer).WriteRecord. If the WAL writer may need to
232 : // read b.data after it returns, it invokes refData to increment the lifecycle's
233 : // reference count. When it's finished, it invokes the returned function
234 : // [Batch.unrefData].
235 1 : func (b *Batch) refData() (unref func()) {
236 1 : b.lifecycle.Add(+1)
237 1 : return b.unrefData
238 1 : }
239 :
240 1 : func (b *Batch) unrefData() {
241 1 : if v := b.lifecycle.Add(-1); (v ^ batchClosedBit) == 0 {
242 1 : // The [batchClosedBit] high bit is set, and there are no outstanding
243 1 : // references. The user of the Batch called [Batch.Close], expecting the
244 1 : // batch to be recycled. However, our outstanding reference count
245 1 : // prevented recycling. As the last to dereference, we're now
246 1 : // responsible for releasing the batch.
247 1 : b.lifecycle.Store(0)
248 1 : b.release()
249 1 : }
250 : }
251 :
252 : // batchInternal contains the set of fields within Batch that are non-atomic and
253 : // capable of being reset using a *b = batchInternal{} struct copy.
254 : type batchInternal struct {
255 : // Data is the wire format of a batch's log entry:
256 : // - 8 bytes for a sequence number of the first batch element,
257 : // or zeroes if the batch has not yet been applied,
258 : // - 4 bytes for the count: the number of elements in the batch,
259 : // or "\xff\xff\xff\xff" if the batch is invalid,
260 : // - count elements, being:
261 : // - one byte for the kind
262 : // - the varint-string user key,
263 : // - the varint-string value (if kind != delete).
264 : // The sequence number and count are stored in little-endian order.
265 : //
266 : // The data field can be (but is not guaranteed to be) nil for new
267 : // batches. Large batches will set the data field to nil when committed as
268 : // the data has been moved to a flushableBatch and inserted into the queue of
269 : // memtables.
270 : data []byte
271 : cmp Compare
272 : formatKey base.FormatKey
273 : abbreviatedKey AbbreviatedKey
274 :
275 : // An upper bound on required space to add this batch to a memtable.
276 : // Note that although batches are limited to 4 GiB in size, that limit
277 : // applies to len(data), not the memtable size. The upper bound on the
278 : // size of a memtable node is larger than the overhead of the batch's log
279 : // encoding, so memTableSize is larger than len(data) and may overflow a
280 : // uint32.
281 : memTableSize uint64
282 :
283 : // The db to which the batch will be committed. Do not change this field
284 : // after the batch has been created as it might invalidate internal state.
285 : // Batch.memTableSize is only refreshed if Batch.db is set. Setting db to
286 : // nil once it has been set implies that the Batch has encountered an error.
287 : db *DB
288 :
289 : // The count of records in the batch. This count will be stored in the batch
290 : // data whenever Repr() is called.
291 : count uint64
292 :
293 : // The count of range deletions in the batch. Updated every time a range
294 : // deletion is added.
295 : countRangeDels uint64
296 :
297 : // The count of range key sets, unsets and deletes in the batch. Updated
298 : // every time a RANGEKEYSET, RANGEKEYUNSET or RANGEKEYDEL key is added.
299 : countRangeKeys uint64
300 :
301 : // A deferredOp struct, stored in the Batch so that a pointer can be returned
302 : // from the *Deferred() methods rather than a value.
303 : deferredOp DeferredBatchOp
304 :
305 : // An optional skiplist keyed by offset into data of the entry.
306 : index *batchskl.Skiplist
307 : rangeDelIndex *batchskl.Skiplist
308 : rangeKeyIndex *batchskl.Skiplist
309 :
310 : // Fragmented range deletion tombstones. Cached the first time a range
311 : // deletion iterator is requested. The cache is invalidated whenever a new
312 : // range deletion is added to the batch. This cache can only be used when
313 : // opening an iterator to read at a batch sequence number >=
314 : // tombstonesSeqNum. This is the case for all new iterators created over a
315 : // batch but it's not the case for all cloned iterators.
316 : tombstones []keyspan.Span
317 : tombstonesSeqNum uint64
318 :
319 : // Fragmented range key spans. Cached the first time a range key iterator is
320 : // requested. The cache is invalidated whenever a new range key
321 : // (RangeKey{Set,Unset,Del}) is added to the batch. This cache can only be
322 : // used when opening an iterator to read at a batch sequence number >=
323 : // tombstonesSeqNum. This is the case for all new iterators created over a
324 : // batch but it's not the case for all cloned iterators.
325 : rangeKeys []keyspan.Span
326 : rangeKeysSeqNum uint64
327 :
328 : // The flushableBatch wrapper if the batch is too large to fit in the
329 : // memtable.
330 : flushable *flushableBatch
331 :
332 : // minimumFormatMajorVersion indicates the format major version required in
333 : // order to commit this batch. If an operation requires a particular format
334 : // major version, it ratchets the batch's minimumFormatMajorVersion. When
335 : // the batch is committed, this is validated against the database's current
336 : // format major version.
337 : minimumFormatMajorVersion FormatMajorVersion
338 :
339 : // Synchronous Apply uses the commit WaitGroup for both publishing the
340 : // seqnum and waiting for the WAL fsync (if needed). Asynchronous
341 : // ApplyNoSyncWait, which implies WriteOptions.Sync is true, uses the commit
342 : // WaitGroup for publishing the seqnum and the fsyncWait WaitGroup for
343 : // waiting for the WAL fsync.
344 : //
345 : // TODO(sumeer): if we find that ApplyNoSyncWait in conjunction with
346 : // SyncWait is causing higher memory usage because of the time duration
347 : // between when the sync is already done, and a goroutine calls SyncWait
348 : // (followed by Batch.Close), we could separate out {fsyncWait, commitErr}
349 : // into a separate struct that is allocated separately (using another
350 : // sync.Pool), and only that struct needs to outlive Batch.Close (which
351 : // could then be called immediately after ApplyNoSyncWait). commitStats
352 : // will also need to be in this separate struct.
353 : commit sync.WaitGroup
354 : fsyncWait sync.WaitGroup
355 :
356 : commitStats BatchCommitStats
357 :
358 : commitErr error
359 :
360 : // Position bools together to reduce the sizeof the struct.
361 :
362 : // ingestedSSTBatch indicates that the batch contains one or more key kinds
363 : // of InternalKeyKindIngestSST. If the batch contains key kinds of IngestSST
364 : // then it will only contain key kinds of IngestSST.
365 : ingestedSSTBatch bool
366 :
367 : // committing is set to true when a batch begins to commit. It's used to
368 : // ensure the batch is not mutated concurrently. It is not an atomic
369 : // deliberately, so as to avoid the overhead on batch mutations. This is
370 : // okay, because under correct usage this field will never be accessed
371 : // concurrently. It's only under incorrect usage the memory accesses of this
372 : // variable may violate memory safety. Since we don't use atomics here,
373 : // false negatives are possible.
374 : committing bool
375 : }
376 :
377 : // BatchCommitStats exposes stats related to committing a batch.
378 : //
379 : // NB: there is no Pebble internal tracing (using LoggerAndTracer) of slow
380 : // batch commits. The caller can use these stats to do their own tracing as
381 : // needed.
382 : type BatchCommitStats struct {
383 : // TotalDuration is the time spent in DB.{Apply,ApplyNoSyncWait} or
384 : // Batch.Commit, plus the time waiting in Batch.SyncWait. If there is a gap
385 : // between calling ApplyNoSyncWait and calling SyncWait, that gap could
386 : // include some duration in which real work was being done for the commit
387 : // and will not be included here. This missing time is considered acceptable
388 : // since the goal of these stats is to understand user-facing latency.
389 : //
390 : // TotalDuration includes time spent in various queues both inside Pebble
391 : // and outside Pebble (I/O queues, goroutine scheduler queue, mutex wait
392 : // etc.). For some of these queues (which we consider important) the wait
393 : // times are included below -- these expose low-level implementation detail
394 : // and are meant for expert diagnosis and subject to change. There may be
395 : // unaccounted time after subtracting those values from TotalDuration.
396 : TotalDuration time.Duration
397 : // SemaphoreWaitDuration is the wait time for semaphores in
398 : // commitPipeline.Commit.
399 : SemaphoreWaitDuration time.Duration
400 : // WALQueueWaitDuration is the wait time for allocating memory blocks in the
401 : // LogWriter (due to the LogWriter not writing fast enough). At the moment
402 : // this is duration is always zero because a single WAL will allow
403 : // allocating memory blocks up to the entire memtable size. In the future,
404 : // we may pipeline WALs and bound the WAL queued blocks separately, so this
405 : // field is preserved for that possibility.
406 : WALQueueWaitDuration time.Duration
407 : // MemTableWriteStallDuration is the wait caused by a write stall due to too
408 : // many memtables (due to not flushing fast enough).
409 : MemTableWriteStallDuration time.Duration
410 : // L0ReadAmpWriteStallDuration is the wait caused by a write stall due to
411 : // high read amplification in L0 (due to not compacting fast enough out of
412 : // L0).
413 : L0ReadAmpWriteStallDuration time.Duration
414 : // WALRotationDuration is the wait time for WAL rotation, which includes
415 : // syncing and closing the old WAL and creating (or reusing) a new one.
416 : WALRotationDuration time.Duration
417 : // CommitWaitDuration is the wait for publishing the seqnum plus the
418 : // duration for the WAL sync (if requested). The former should be tiny and
419 : // one can assume that this is all due to the WAL sync.
420 : CommitWaitDuration time.Duration
421 : }
422 :
423 : var _ Reader = (*Batch)(nil)
424 : var _ Writer = (*Batch)(nil)
425 :
426 : var batchPool = sync.Pool{
427 1 : New: func() interface{} {
428 1 : return &Batch{}
429 1 : },
430 : }
431 :
432 : type indexedBatch struct {
433 : batch Batch
434 : index batchskl.Skiplist
435 : }
436 :
437 : var indexedBatchPool = sync.Pool{
438 1 : New: func() interface{} {
439 1 : return &indexedBatch{}
440 1 : },
441 : }
442 :
443 1 : func newBatch(db *DB) *Batch {
444 1 : b := batchPool.Get().(*Batch)
445 1 : b.db = db
446 1 : return b
447 1 : }
448 :
449 1 : func newBatchWithSize(db *DB, size int) *Batch {
450 1 : b := newBatch(db)
451 1 : if cap(b.data) < size {
452 1 : b.data = rawalloc.New(0, size)
453 1 : }
454 1 : return b
455 : }
456 :
457 1 : func newIndexedBatch(db *DB, comparer *Comparer) *Batch {
458 1 : i := indexedBatchPool.Get().(*indexedBatch)
459 1 : i.batch.cmp = comparer.Compare
460 1 : i.batch.formatKey = comparer.FormatKey
461 1 : i.batch.abbreviatedKey = comparer.AbbreviatedKey
462 1 : i.batch.db = db
463 1 : i.batch.index = &i.index
464 1 : i.batch.index.Init(&i.batch.data, i.batch.cmp, i.batch.abbreviatedKey)
465 1 : return &i.batch
466 1 : }
467 :
468 0 : func newIndexedBatchWithSize(db *DB, comparer *Comparer, size int) *Batch {
469 0 : b := newIndexedBatch(db, comparer)
470 0 : if cap(b.data) < size {
471 0 : b.data = rawalloc.New(0, size)
472 0 : }
473 0 : return b
474 : }
475 :
476 : // nextSeqNum returns the batch "sequence number" that will be given to the next
477 : // key written to the batch. During iteration keys within an indexed batch are
478 : // given a sequence number consisting of their offset within the batch combined
479 : // with the base.InternalKeySeqNumBatch bit. These sequence numbers are only
480 : // used during iteration, and the keys are assigned ordinary sequence numbers
481 : // when the batch is committed.
482 1 : func (b *Batch) nextSeqNum() uint64 {
483 1 : return uint64(len(b.data)) | base.InternalKeySeqNumBatch
484 1 : }
485 :
486 1 : func (b *Batch) release() {
487 1 : if b.db == nil {
488 1 : // The batch was not created using newBatch or newIndexedBatch, or an error
489 1 : // was encountered. We don't try to reuse batches that encountered an error
490 1 : // because they might be stuck somewhere in the system and attempting to
491 1 : // reuse such batches is a recipe for onerous debugging sessions. Instead,
492 1 : // let the GC do its job.
493 1 : return
494 1 : }
495 1 : b.db = nil
496 1 :
497 1 : // NB: This is ugly (it would be cleaner if we could just assign a Batch{}),
498 1 : // but necessary so that we can use atomic.StoreUint32 for the Batch.applied
499 1 : // field. Without using an atomic to clear that field the Go race detector
500 1 : // complains.
501 1 : b.reset()
502 1 : b.cmp = nil
503 1 : b.formatKey = nil
504 1 : b.abbreviatedKey = nil
505 1 :
506 1 : if b.index == nil {
507 1 : batchPool.Put(b)
508 1 : } else {
509 1 : b.index, b.rangeDelIndex, b.rangeKeyIndex = nil, nil, nil
510 1 : indexedBatchPool.Put((*indexedBatch)(unsafe.Pointer(b)))
511 1 : }
512 : }
513 :
514 1 : func (b *Batch) refreshMemTableSize() error {
515 1 : b.memTableSize = 0
516 1 : if len(b.data) < batchrepr.HeaderLen {
517 1 : return nil
518 1 : }
519 :
520 1 : b.countRangeDels = 0
521 1 : b.countRangeKeys = 0
522 1 : b.minimumFormatMajorVersion = 0
523 1 : for r := b.Reader(); ; {
524 1 : kind, key, value, ok, err := r.Next()
525 1 : if !ok {
526 1 : if err != nil {
527 0 : return err
528 0 : }
529 1 : break
530 : }
531 1 : switch kind {
532 1 : case InternalKeyKindRangeDelete:
533 1 : b.countRangeDels++
534 1 : case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
535 1 : b.countRangeKeys++
536 1 : case InternalKeyKindSet, InternalKeyKindDelete, InternalKeyKindMerge, InternalKeyKindSingleDelete, InternalKeyKindSetWithDelete:
537 : // fallthrough
538 1 : case InternalKeyKindDeleteSized:
539 1 : if b.minimumFormatMajorVersion < FormatDeleteSizedAndObsolete {
540 1 : b.minimumFormatMajorVersion = FormatDeleteSizedAndObsolete
541 1 : }
542 1 : case InternalKeyKindLogData:
543 1 : // LogData does not contribute to memtable size.
544 1 : continue
545 1 : case InternalKeyKindIngestSST:
546 1 : if b.minimumFormatMajorVersion < FormatFlushableIngest {
547 1 : b.minimumFormatMajorVersion = FormatFlushableIngest
548 1 : }
549 : // This key kind doesn't contribute to the memtable size.
550 1 : continue
551 0 : default:
552 0 : // Note In some circumstances this might be temporary memory
553 0 : // corruption that can be recovered by discarding the batch and
554 0 : // trying again. In other cases, the batch repr might've been
555 0 : // already persisted elsewhere, and we'll loop continuously trying
556 0 : // to commit the same corrupted batch. The caller is responsible for
557 0 : // distinguishing.
558 0 : return errors.Wrapf(ErrInvalidBatch, "unrecognized kind %v", kind)
559 : }
560 1 : b.memTableSize += memTableEntrySize(len(key), len(value))
561 : }
562 1 : return nil
563 : }
564 :
565 : // Apply the operations contained in the batch to the receiver batch.
566 : //
567 : // It is safe to modify the contents of the arguments after Apply returns.
568 : //
569 : // Apply returns ErrInvalidBatch if the provided batch is invalid in any way.
570 1 : func (b *Batch) Apply(batch *Batch, _ *WriteOptions) error {
571 1 : if b.ingestedSSTBatch {
572 0 : panic("pebble: invalid batch application")
573 : }
574 1 : if len(batch.data) == 0 {
575 0 : return nil
576 0 : }
577 1 : if len(batch.data) < batchrepr.HeaderLen {
578 0 : return ErrInvalidBatch
579 0 : }
580 :
581 1 : offset := len(b.data)
582 1 : if offset == 0 {
583 1 : b.init(offset)
584 1 : offset = batchrepr.HeaderLen
585 1 : }
586 1 : b.data = append(b.data, batch.data[batchrepr.HeaderLen:]...)
587 1 :
588 1 : b.setCount(b.Count() + batch.Count())
589 1 :
590 1 : if b.db != nil || b.index != nil {
591 1 : // Only iterate over the new entries if we need to track memTableSize or in
592 1 : // order to update the index.
593 1 : for iter := batchrepr.Reader(b.data[offset:]); len(iter) > 0; {
594 1 : offset := uintptr(unsafe.Pointer(&iter[0])) - uintptr(unsafe.Pointer(&b.data[0]))
595 1 : kind, key, value, ok, err := iter.Next()
596 1 : if !ok {
597 0 : if err != nil {
598 0 : return err
599 0 : }
600 0 : break
601 : }
602 1 : switch kind {
603 1 : case InternalKeyKindRangeDelete:
604 1 : b.countRangeDels++
605 1 : case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
606 1 : b.countRangeKeys++
607 0 : case InternalKeyKindIngestSST:
608 0 : panic("pebble: invalid key kind for batch")
609 1 : case InternalKeyKindLogData:
610 1 : // LogData does not contribute to memtable size.
611 1 : continue
612 : case InternalKeyKindSet, InternalKeyKindDelete, InternalKeyKindMerge,
613 1 : InternalKeyKindSingleDelete, InternalKeyKindSetWithDelete, InternalKeyKindDeleteSized:
614 : // fallthrough
615 0 : default:
616 0 : // Note In some circumstances this might be temporary memory
617 0 : // corruption that can be recovered by discarding the batch and
618 0 : // trying again. In other cases, the batch repr might've been
619 0 : // already persisted elsewhere, and we'll loop continuously
620 0 : // trying to commit the same corrupted batch. The caller is
621 0 : // responsible for distinguishing.
622 0 : return errors.Wrapf(ErrInvalidBatch, "unrecognized kind %v", kind)
623 : }
624 1 : if b.index != nil {
625 1 : var err error
626 1 : switch kind {
627 1 : case InternalKeyKindRangeDelete:
628 1 : b.tombstones = nil
629 1 : b.tombstonesSeqNum = 0
630 1 : if b.rangeDelIndex == nil {
631 1 : b.rangeDelIndex = batchskl.NewSkiplist(&b.data, b.cmp, b.abbreviatedKey)
632 1 : }
633 1 : err = b.rangeDelIndex.Add(uint32(offset))
634 1 : case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
635 1 : b.rangeKeys = nil
636 1 : b.rangeKeysSeqNum = 0
637 1 : if b.rangeKeyIndex == nil {
638 1 : b.rangeKeyIndex = batchskl.NewSkiplist(&b.data, b.cmp, b.abbreviatedKey)
639 1 : }
640 1 : err = b.rangeKeyIndex.Add(uint32(offset))
641 1 : default:
642 1 : err = b.index.Add(uint32(offset))
643 : }
644 1 : if err != nil {
645 0 : return err
646 0 : }
647 : }
648 1 : b.memTableSize += memTableEntrySize(len(key), len(value))
649 : }
650 : }
651 1 : return nil
652 : }
653 :
654 : // Get gets the value for the given key. It returns ErrNotFound if the Batch
655 : // does not contain the key.
656 : //
657 : // The caller should not modify the contents of the returned slice, but it is
658 : // safe to modify the contents of the argument after Get returns. The returned
659 : // slice will remain valid until the returned Closer is closed. On success, the
660 : // caller MUST call closer.Close() or a memory leak will occur.
661 1 : func (b *Batch) Get(key []byte) ([]byte, io.Closer, error) {
662 1 : if b.index == nil {
663 0 : return nil, nil, ErrNotIndexed
664 0 : }
665 1 : return b.db.getInternal(key, b, nil /* snapshot */)
666 : }
667 :
668 1 : func (b *Batch) prepareDeferredKeyValueRecord(keyLen, valueLen int, kind InternalKeyKind) {
669 1 : if b.committing {
670 0 : panic("pebble: batch already committing")
671 : }
672 1 : if len(b.data) == 0 {
673 1 : b.init(keyLen + valueLen + 2*binary.MaxVarintLen64 + batchrepr.HeaderLen)
674 1 : }
675 1 : b.count++
676 1 : b.memTableSize += memTableEntrySize(keyLen, valueLen)
677 1 :
678 1 : pos := len(b.data)
679 1 : b.deferredOp.offset = uint32(pos)
680 1 : b.grow(1 + 2*maxVarintLen32 + keyLen + valueLen)
681 1 : b.data[pos] = byte(kind)
682 1 : pos++
683 1 :
684 1 : {
685 1 : // TODO(peter): Manually inlined version binary.PutUvarint(). This is 20%
686 1 : // faster on BenchmarkBatchSet on go1.13. Remove if go1.14 or future
687 1 : // versions show this to not be a performance win.
688 1 : x := uint32(keyLen)
689 1 : for x >= 0x80 {
690 1 : b.data[pos] = byte(x) | 0x80
691 1 : x >>= 7
692 1 : pos++
693 1 : }
694 1 : b.data[pos] = byte(x)
695 1 : pos++
696 : }
697 :
698 1 : b.deferredOp.Key = b.data[pos : pos+keyLen]
699 1 : pos += keyLen
700 1 :
701 1 : {
702 1 : // TODO(peter): Manually inlined version binary.PutUvarint(). This is 20%
703 1 : // faster on BenchmarkBatchSet on go1.13. Remove if go1.14 or future
704 1 : // versions show this to not be a performance win.
705 1 : x := uint32(valueLen)
706 1 : for x >= 0x80 {
707 1 : b.data[pos] = byte(x) | 0x80
708 1 : x >>= 7
709 1 : pos++
710 1 : }
711 1 : b.data[pos] = byte(x)
712 1 : pos++
713 : }
714 :
715 1 : b.deferredOp.Value = b.data[pos : pos+valueLen]
716 1 : // Shrink data since varints may be shorter than the upper bound.
717 1 : b.data = b.data[:pos+valueLen]
718 : }
719 :
720 1 : func (b *Batch) prepareDeferredKeyRecord(keyLen int, kind InternalKeyKind) {
721 1 : if b.committing {
722 0 : panic("pebble: batch already committing")
723 : }
724 1 : if len(b.data) == 0 {
725 1 : b.init(keyLen + binary.MaxVarintLen64 + batchrepr.HeaderLen)
726 1 : }
727 1 : b.count++
728 1 : b.memTableSize += memTableEntrySize(keyLen, 0)
729 1 :
730 1 : pos := len(b.data)
731 1 : b.deferredOp.offset = uint32(pos)
732 1 : b.grow(1 + maxVarintLen32 + keyLen)
733 1 : b.data[pos] = byte(kind)
734 1 : pos++
735 1 :
736 1 : {
737 1 : // TODO(peter): Manually inlined version binary.PutUvarint(). Remove if
738 1 : // go1.13 or future versions show this to not be a performance win. See
739 1 : // BenchmarkBatchSet.
740 1 : x := uint32(keyLen)
741 1 : for x >= 0x80 {
742 0 : b.data[pos] = byte(x) | 0x80
743 0 : x >>= 7
744 0 : pos++
745 0 : }
746 1 : b.data[pos] = byte(x)
747 1 : pos++
748 : }
749 :
750 1 : b.deferredOp.Key = b.data[pos : pos+keyLen]
751 1 : b.deferredOp.Value = nil
752 1 :
753 1 : // Shrink data since varint may be shorter than the upper bound.
754 1 : b.data = b.data[:pos+keyLen]
755 : }
756 :
757 : // AddInternalKey allows the caller to add an internal key of point key or range
758 : // key kinds (but not RangeDelete) to a batch. Passing in an internal key of
759 : // kind RangeDelete will result in a panic. Note that the seqnum in the internal
760 : // key is effectively ignored, even though the Kind is preserved. This is
761 : // because the batch format does not allow for a per-key seqnum to be specified,
762 : // only a batch-wide one.
763 : //
764 : // Note that non-indexed keys (IngestKeyKind{LogData,IngestSST}) are not
765 : // supported with this method as they require specialized logic.
766 1 : func (b *Batch) AddInternalKey(key *base.InternalKey, value []byte, _ *WriteOptions) error {
767 1 : keyLen := len(key.UserKey)
768 1 : hasValue := false
769 1 : switch kind := key.Kind(); kind {
770 0 : case InternalKeyKindRangeDelete:
771 0 : panic("unexpected range delete in AddInternalKey")
772 1 : case InternalKeyKindSingleDelete, InternalKeyKindDelete:
773 1 : b.prepareDeferredKeyRecord(keyLen, kind)
774 1 : b.deferredOp.index = b.index
775 1 : case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
776 1 : b.prepareDeferredKeyValueRecord(keyLen, len(value), kind)
777 1 : hasValue = true
778 1 : b.incrementRangeKeysCount()
779 1 : default:
780 1 : b.prepareDeferredKeyValueRecord(keyLen, len(value), kind)
781 1 : hasValue = true
782 1 : b.deferredOp.index = b.index
783 : }
784 1 : copy(b.deferredOp.Key, key.UserKey)
785 1 : if hasValue {
786 1 : copy(b.deferredOp.Value, value)
787 1 : }
788 :
789 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
790 : // in go1.13 will remove the need for this.
791 1 : if b.index != nil {
792 0 : if err := b.index.Add(b.deferredOp.offset); err != nil {
793 0 : return err
794 0 : }
795 : }
796 1 : return nil
797 : }
798 :
799 : // Set adds an action to the batch that sets the key to map to the value.
800 : //
801 : // It is safe to modify the contents of the arguments after Set returns.
802 1 : func (b *Batch) Set(key, value []byte, _ *WriteOptions) error {
803 1 : deferredOp := b.SetDeferred(len(key), len(value))
804 1 : copy(deferredOp.Key, key)
805 1 : copy(deferredOp.Value, value)
806 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
807 1 : // in go1.13 will remove the need for this.
808 1 : if b.index != nil {
809 1 : if err := b.index.Add(deferredOp.offset); err != nil {
810 0 : return err
811 0 : }
812 : }
813 1 : return nil
814 : }
815 :
816 : // SetDeferred is similar to Set in that it adds a set operation to the batch,
817 : // except it only takes in key/value lengths instead of complete slices,
818 : // letting the caller encode into those objects and then call Finish() on the
819 : // returned object.
820 1 : func (b *Batch) SetDeferred(keyLen, valueLen int) *DeferredBatchOp {
821 1 : b.prepareDeferredKeyValueRecord(keyLen, valueLen, InternalKeyKindSet)
822 1 : b.deferredOp.index = b.index
823 1 : return &b.deferredOp
824 1 : }
825 :
826 : // Merge adds an action to the batch that merges the value at key with the new
827 : // value. The details of the merge are dependent upon the configured merge
828 : // operator.
829 : //
830 : // It is safe to modify the contents of the arguments after Merge returns.
831 1 : func (b *Batch) Merge(key, value []byte, _ *WriteOptions) error {
832 1 : deferredOp := b.MergeDeferred(len(key), len(value))
833 1 : copy(deferredOp.Key, key)
834 1 : copy(deferredOp.Value, value)
835 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
836 1 : // in go1.13 will remove the need for this.
837 1 : if b.index != nil {
838 1 : if err := b.index.Add(deferredOp.offset); err != nil {
839 0 : return err
840 0 : }
841 : }
842 1 : return nil
843 : }
844 :
845 : // MergeDeferred is similar to Merge in that it adds a merge operation to the
846 : // batch, except it only takes in key/value lengths instead of complete slices,
847 : // letting the caller encode into those objects and then call Finish() on the
848 : // returned object.
849 1 : func (b *Batch) MergeDeferred(keyLen, valueLen int) *DeferredBatchOp {
850 1 : b.prepareDeferredKeyValueRecord(keyLen, valueLen, InternalKeyKindMerge)
851 1 : b.deferredOp.index = b.index
852 1 : return &b.deferredOp
853 1 : }
854 :
855 : // Delete adds an action to the batch that deletes the entry for key.
856 : //
857 : // It is safe to modify the contents of the arguments after Delete returns.
858 1 : func (b *Batch) Delete(key []byte, _ *WriteOptions) error {
859 1 : deferredOp := b.DeleteDeferred(len(key))
860 1 : copy(deferredOp.Key, key)
861 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
862 1 : // in go1.13 will remove the need for this.
863 1 : if b.index != nil {
864 1 : if err := b.index.Add(deferredOp.offset); err != nil {
865 0 : return err
866 0 : }
867 : }
868 1 : return nil
869 : }
870 :
871 : // DeleteDeferred is similar to Delete in that it adds a delete operation to
872 : // the batch, except it only takes in key/value lengths instead of complete
873 : // slices, letting the caller encode into those objects and then call Finish()
874 : // on the returned object.
875 1 : func (b *Batch) DeleteDeferred(keyLen int) *DeferredBatchOp {
876 1 : b.prepareDeferredKeyRecord(keyLen, InternalKeyKindDelete)
877 1 : b.deferredOp.index = b.index
878 1 : return &b.deferredOp
879 1 : }
880 :
881 : // DeleteSized behaves identically to Delete, but takes an additional
882 : // argument indicating the size of the value being deleted. DeleteSized
883 : // should be preferred when the caller has the expectation that there exists
884 : // a single internal KV pair for the key (eg, the key has not been
885 : // overwritten recently), and the caller knows the size of its value.
886 : //
887 : // DeleteSized will record the value size within the tombstone and use it to
888 : // inform compaction-picking heuristics which strive to reduce space
889 : // amplification in the LSM. This "calling your shot" mechanic allows the
890 : // storage engine to more accurately estimate and reduce space amplification.
891 : //
892 : // It is safe to modify the contents of the arguments after DeleteSized
893 : // returns.
894 1 : func (b *Batch) DeleteSized(key []byte, deletedValueSize uint32, _ *WriteOptions) error {
895 1 : deferredOp := b.DeleteSizedDeferred(len(key), deletedValueSize)
896 1 : copy(b.deferredOp.Key, key)
897 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Check if in a
898 1 : // later Go release this is unnecessary.
899 1 : if b.index != nil {
900 1 : if err := b.index.Add(deferredOp.offset); err != nil {
901 0 : return err
902 0 : }
903 : }
904 1 : return nil
905 : }
906 :
907 : // DeleteSizedDeferred is similar to DeleteSized in that it adds a sized delete
908 : // operation to the batch, except it only takes in key length instead of a
909 : // complete key slice, letting the caller encode into the DeferredBatchOp.Key
910 : // slice and then call Finish() on the returned object.
911 1 : func (b *Batch) DeleteSizedDeferred(keyLen int, deletedValueSize uint32) *DeferredBatchOp {
912 1 : if b.minimumFormatMajorVersion < FormatDeleteSizedAndObsolete {
913 1 : b.minimumFormatMajorVersion = FormatDeleteSizedAndObsolete
914 1 : }
915 :
916 : // Encode the sum of the key length and the value in the value.
917 1 : v := uint64(deletedValueSize) + uint64(keyLen)
918 1 :
919 1 : // Encode `v` as a varint.
920 1 : var buf [binary.MaxVarintLen64]byte
921 1 : n := 0
922 1 : {
923 1 : x := v
924 1 : for x >= 0x80 {
925 1 : buf[n] = byte(x) | 0x80
926 1 : x >>= 7
927 1 : n++
928 1 : }
929 1 : buf[n] = byte(x)
930 1 : n++
931 : }
932 :
933 : // NB: In batch entries and sstable entries, values are stored as
934 : // varstrings. Here, the value is itself a simple varint. This results in an
935 : // unnecessary double layer of encoding:
936 : // varint(n) varint(deletedValueSize)
937 : // The first varint will always be 1-byte, since a varint-encoded uint64
938 : // will never exceed 128 bytes. This unnecessary extra byte and wrapping is
939 : // preserved to avoid special casing across the database, and in particular
940 : // in sstable block decoding which is performance sensitive.
941 1 : b.prepareDeferredKeyValueRecord(keyLen, n, InternalKeyKindDeleteSized)
942 1 : b.deferredOp.index = b.index
943 1 : copy(b.deferredOp.Value, buf[:n])
944 1 : return &b.deferredOp
945 : }
946 :
947 : // SingleDelete adds an action to the batch that single deletes the entry for key.
948 : // See Writer.SingleDelete for more details on the semantics of SingleDelete.
949 : //
950 : // It is safe to modify the contents of the arguments after SingleDelete returns.
951 1 : func (b *Batch) SingleDelete(key []byte, _ *WriteOptions) error {
952 1 : deferredOp := b.SingleDeleteDeferred(len(key))
953 1 : copy(deferredOp.Key, key)
954 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
955 1 : // in go1.13 will remove the need for this.
956 1 : if b.index != nil {
957 1 : if err := b.index.Add(deferredOp.offset); err != nil {
958 0 : return err
959 0 : }
960 : }
961 1 : return nil
962 : }
963 :
964 : // SingleDeleteDeferred is similar to SingleDelete in that it adds a single delete
965 : // operation to the batch, except it only takes in key/value lengths instead of
966 : // complete slices, letting the caller encode into those objects and then call
967 : // Finish() on the returned object.
968 1 : func (b *Batch) SingleDeleteDeferred(keyLen int) *DeferredBatchOp {
969 1 : b.prepareDeferredKeyRecord(keyLen, InternalKeyKindSingleDelete)
970 1 : b.deferredOp.index = b.index
971 1 : return &b.deferredOp
972 1 : }
973 :
974 : // DeleteRange deletes all of the point keys (and values) in the range
975 : // [start,end) (inclusive on start, exclusive on end). DeleteRange does NOT
976 : // delete overlapping range keys (eg, keys set via RangeKeySet).
977 : //
978 : // It is safe to modify the contents of the arguments after DeleteRange
979 : // returns.
980 1 : func (b *Batch) DeleteRange(start, end []byte, _ *WriteOptions) error {
981 1 : deferredOp := b.DeleteRangeDeferred(len(start), len(end))
982 1 : copy(deferredOp.Key, start)
983 1 : copy(deferredOp.Value, end)
984 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
985 1 : // in go1.13 will remove the need for this.
986 1 : if deferredOp.index != nil {
987 1 : if err := deferredOp.index.Add(deferredOp.offset); err != nil {
988 0 : return err
989 0 : }
990 : }
991 1 : return nil
992 : }
993 :
994 : // DeleteRangeDeferred is similar to DeleteRange in that it adds a delete range
995 : // operation to the batch, except it only takes in key lengths instead of
996 : // complete slices, letting the caller encode into those objects and then call
997 : // Finish() on the returned object. Note that DeferredBatchOp.Key should be
998 : // populated with the start key, and DeferredBatchOp.Value should be populated
999 : // with the end key.
1000 1 : func (b *Batch) DeleteRangeDeferred(startLen, endLen int) *DeferredBatchOp {
1001 1 : b.prepareDeferredKeyValueRecord(startLen, endLen, InternalKeyKindRangeDelete)
1002 1 : b.countRangeDels++
1003 1 : if b.index != nil {
1004 1 : b.tombstones = nil
1005 1 : b.tombstonesSeqNum = 0
1006 1 : // Range deletions are rare, so we lazily allocate the index for them.
1007 1 : if b.rangeDelIndex == nil {
1008 1 : b.rangeDelIndex = batchskl.NewSkiplist(&b.data, b.cmp, b.abbreviatedKey)
1009 1 : }
1010 1 : b.deferredOp.index = b.rangeDelIndex
1011 : }
1012 1 : return &b.deferredOp
1013 : }
1014 :
1015 : // RangeKeySet sets a range key mapping the key range [start, end) at the MVCC
1016 : // timestamp suffix to value. The suffix is optional. If any portion of the key
1017 : // range [start, end) is already set by a range key with the same suffix value,
1018 : // RangeKeySet overrides it.
1019 : //
1020 : // It is safe to modify the contents of the arguments after RangeKeySet returns.
1021 1 : func (b *Batch) RangeKeySet(start, end, suffix, value []byte, _ *WriteOptions) error {
1022 1 : if invariants.Enabled && b.db != nil {
1023 1 : // RangeKeySet is only supported on prefix keys.
1024 1 : if b.db.opts.Comparer.Split(start) != len(start) {
1025 0 : panic("RangeKeySet called with suffixed start key")
1026 : }
1027 1 : if b.db.opts.Comparer.Split(end) != len(end) {
1028 0 : panic("RangeKeySet called with suffixed end key")
1029 : }
1030 : }
1031 1 : suffixValues := [1]rangekey.SuffixValue{{Suffix: suffix, Value: value}}
1032 1 : internalValueLen := rangekey.EncodedSetValueLen(end, suffixValues[:])
1033 1 :
1034 1 : deferredOp := b.rangeKeySetDeferred(len(start), internalValueLen)
1035 1 : copy(deferredOp.Key, start)
1036 1 : n := rangekey.EncodeSetValue(deferredOp.Value, end, suffixValues[:])
1037 1 : if n != internalValueLen {
1038 0 : panic("unexpected internal value length mismatch")
1039 : }
1040 :
1041 : // Manually inline DeferredBatchOp.Finish().
1042 1 : if deferredOp.index != nil {
1043 1 : if err := deferredOp.index.Add(deferredOp.offset); err != nil {
1044 0 : return err
1045 0 : }
1046 : }
1047 1 : return nil
1048 : }
1049 :
1050 1 : func (b *Batch) rangeKeySetDeferred(startLen, internalValueLen int) *DeferredBatchOp {
1051 1 : b.prepareDeferredKeyValueRecord(startLen, internalValueLen, InternalKeyKindRangeKeySet)
1052 1 : b.incrementRangeKeysCount()
1053 1 : return &b.deferredOp
1054 1 : }
1055 :
1056 1 : func (b *Batch) incrementRangeKeysCount() {
1057 1 : b.countRangeKeys++
1058 1 : if b.index != nil {
1059 1 : b.rangeKeys = nil
1060 1 : b.rangeKeysSeqNum = 0
1061 1 : // Range keys are rare, so we lazily allocate the index for them.
1062 1 : if b.rangeKeyIndex == nil {
1063 1 : b.rangeKeyIndex = batchskl.NewSkiplist(&b.data, b.cmp, b.abbreviatedKey)
1064 1 : }
1065 1 : b.deferredOp.index = b.rangeKeyIndex
1066 : }
1067 : }
1068 :
1069 : // RangeKeyUnset removes a range key mapping the key range [start, end) at the
1070 : // MVCC timestamp suffix. The suffix may be omitted to remove an unsuffixed
1071 : // range key. RangeKeyUnset only removes portions of range keys that fall within
1072 : // the [start, end) key span, and only range keys with suffixes that exactly
1073 : // match the unset suffix.
1074 : //
1075 : // It is safe to modify the contents of the arguments after RangeKeyUnset
1076 : // returns.
1077 1 : func (b *Batch) RangeKeyUnset(start, end, suffix []byte, _ *WriteOptions) error {
1078 1 : if invariants.Enabled && b.db != nil {
1079 1 : // RangeKeyUnset is only supported on prefix keys.
1080 1 : if b.db.opts.Comparer.Split(start) != len(start) {
1081 0 : panic("RangeKeyUnset called with suffixed start key")
1082 : }
1083 1 : if b.db.opts.Comparer.Split(end) != len(end) {
1084 0 : panic("RangeKeyUnset called with suffixed end key")
1085 : }
1086 : }
1087 1 : suffixes := [1][]byte{suffix}
1088 1 : internalValueLen := rangekey.EncodedUnsetValueLen(end, suffixes[:])
1089 1 :
1090 1 : deferredOp := b.rangeKeyUnsetDeferred(len(start), internalValueLen)
1091 1 : copy(deferredOp.Key, start)
1092 1 : n := rangekey.EncodeUnsetValue(deferredOp.Value, end, suffixes[:])
1093 1 : if n != internalValueLen {
1094 0 : panic("unexpected internal value length mismatch")
1095 : }
1096 :
1097 : // Manually inline DeferredBatchOp.Finish()
1098 1 : if deferredOp.index != nil {
1099 1 : if err := deferredOp.index.Add(deferredOp.offset); err != nil {
1100 0 : return err
1101 0 : }
1102 : }
1103 1 : return nil
1104 : }
1105 :
1106 1 : func (b *Batch) rangeKeyUnsetDeferred(startLen, internalValueLen int) *DeferredBatchOp {
1107 1 : b.prepareDeferredKeyValueRecord(startLen, internalValueLen, InternalKeyKindRangeKeyUnset)
1108 1 : b.incrementRangeKeysCount()
1109 1 : return &b.deferredOp
1110 1 : }
1111 :
1112 : // RangeKeyDelete deletes all of the range keys in the range [start,end)
1113 : // (inclusive on start, exclusive on end). It does not delete point keys (for
1114 : // that use DeleteRange). RangeKeyDelete removes all range keys within the
1115 : // bounds, including those with or without suffixes.
1116 : //
1117 : // It is safe to modify the contents of the arguments after RangeKeyDelete
1118 : // returns.
1119 1 : func (b *Batch) RangeKeyDelete(start, end []byte, _ *WriteOptions) error {
1120 1 : if invariants.Enabled && b.db != nil {
1121 1 : // RangeKeyDelete is only supported on prefix keys.
1122 1 : if b.db.opts.Comparer.Split(start) != len(start) {
1123 0 : panic("RangeKeyDelete called with suffixed start key")
1124 : }
1125 1 : if b.db.opts.Comparer.Split(end) != len(end) {
1126 0 : panic("RangeKeyDelete called with suffixed end key")
1127 : }
1128 : }
1129 1 : deferredOp := b.RangeKeyDeleteDeferred(len(start), len(end))
1130 1 : copy(deferredOp.Key, start)
1131 1 : copy(deferredOp.Value, end)
1132 1 : // Manually inline DeferredBatchOp.Finish().
1133 1 : if deferredOp.index != nil {
1134 1 : if err := deferredOp.index.Add(deferredOp.offset); err != nil {
1135 0 : return err
1136 0 : }
1137 : }
1138 1 : return nil
1139 : }
1140 :
1141 : // RangeKeyDeleteDeferred is similar to RangeKeyDelete in that it adds an
1142 : // operation to delete range keys to the batch, except it only takes in key
1143 : // lengths instead of complete slices, letting the caller encode into those
1144 : // objects and then call Finish() on the returned object. Note that
1145 : // DeferredBatchOp.Key should be populated with the start key, and
1146 : // DeferredBatchOp.Value should be populated with the end key.
1147 1 : func (b *Batch) RangeKeyDeleteDeferred(startLen, endLen int) *DeferredBatchOp {
1148 1 : b.prepareDeferredKeyValueRecord(startLen, endLen, InternalKeyKindRangeKeyDelete)
1149 1 : b.incrementRangeKeysCount()
1150 1 : return &b.deferredOp
1151 1 : }
1152 :
1153 : // LogData adds the specified to the batch. The data will be written to the
1154 : // WAL, but not added to memtables or sstables. Log data is never indexed,
1155 : // which makes it useful for testing WAL performance.
1156 : //
1157 : // It is safe to modify the contents of the argument after LogData returns.
1158 1 : func (b *Batch) LogData(data []byte, _ *WriteOptions) error {
1159 1 : origCount, origMemTableSize := b.count, b.memTableSize
1160 1 : b.prepareDeferredKeyRecord(len(data), InternalKeyKindLogData)
1161 1 : copy(b.deferredOp.Key, data)
1162 1 : // Since LogData only writes to the WAL and does not affect the memtable, we
1163 1 : // restore b.count and b.memTableSize to their origin values. Note that
1164 1 : // Batch.count only refers to records that are added to the memtable.
1165 1 : b.count, b.memTableSize = origCount, origMemTableSize
1166 1 : return nil
1167 1 : }
1168 :
1169 : // IngestSST adds the FileNum for an sstable to the batch. The data will only be
1170 : // written to the WAL (not added to memtables or sstables).
1171 1 : func (b *Batch) ingestSST(fileNum base.FileNum) {
1172 1 : if b.Empty() {
1173 1 : b.ingestedSSTBatch = true
1174 1 : } else if !b.ingestedSSTBatch {
1175 0 : // Batch contains other key kinds.
1176 0 : panic("pebble: invalid call to ingestSST")
1177 : }
1178 :
1179 1 : origMemTableSize := b.memTableSize
1180 1 : var buf [binary.MaxVarintLen64]byte
1181 1 : length := binary.PutUvarint(buf[:], uint64(fileNum))
1182 1 : b.prepareDeferredKeyRecord(length, InternalKeyKindIngestSST)
1183 1 : copy(b.deferredOp.Key, buf[:length])
1184 1 : // Since IngestSST writes only to the WAL and does not affect the memtable,
1185 1 : // we restore b.memTableSize to its original value. Note that Batch.count
1186 1 : // is not reset because for the InternalKeyKindIngestSST the count is the
1187 1 : // number of sstable paths which have been added to the batch.
1188 1 : b.memTableSize = origMemTableSize
1189 1 : b.minimumFormatMajorVersion = FormatFlushableIngest
1190 : }
1191 :
1192 : // Empty returns true if the batch is empty, and false otherwise.
1193 1 : func (b *Batch) Empty() bool {
1194 1 : return batchrepr.IsEmpty(b.data)
1195 1 : }
1196 :
1197 : // Len returns the current size of the batch in bytes.
1198 1 : func (b *Batch) Len() int {
1199 1 : return max(batchrepr.HeaderLen, len(b.data))
1200 1 : }
1201 :
1202 : // Repr returns the underlying batch representation. It is not safe to modify
1203 : // the contents. Reset() will not change the contents of the returned value,
1204 : // though any other mutation operation may do so.
1205 1 : func (b *Batch) Repr() []byte {
1206 1 : if len(b.data) == 0 {
1207 1 : b.init(batchrepr.HeaderLen)
1208 1 : }
1209 1 : batchrepr.SetCount(b.data, b.Count())
1210 1 : return b.data
1211 : }
1212 :
1213 : // SetRepr sets the underlying batch representation. The batch takes ownership
1214 : // of the supplied slice. It is not safe to modify it afterwards until the
1215 : // Batch is no longer in use.
1216 : //
1217 : // SetRepr may return ErrInvalidBatch if the supplied slice fails to decode in
1218 : // any way. It will not return an error in any other circumstance.
1219 1 : func (b *Batch) SetRepr(data []byte) error {
1220 1 : h, ok := batchrepr.ReadHeader(data)
1221 1 : if !ok {
1222 0 : return ErrInvalidBatch
1223 0 : }
1224 1 : b.data = data
1225 1 : b.count = uint64(h.Count)
1226 1 : var err error
1227 1 : if b.db != nil {
1228 1 : // Only track memTableSize for batches that will be committed to the DB.
1229 1 : err = b.refreshMemTableSize()
1230 1 : }
1231 1 : return err
1232 : }
1233 :
1234 : // NewIter returns an iterator that is unpositioned (Iterator.Valid() will
1235 : // return false). The iterator can be positioned via a call to SeekGE,
1236 : // SeekPrefixGE, SeekLT, First or Last. Only indexed batches support iterators.
1237 : //
1238 : // The returned Iterator observes all of the Batch's existing mutations, but no
1239 : // later mutations. Its view can be refreshed via RefreshBatchSnapshot or
1240 : // SetOptions().
1241 1 : func (b *Batch) NewIter(o *IterOptions) (*Iterator, error) {
1242 1 : return b.NewIterWithContext(context.Background(), o)
1243 1 : }
1244 :
1245 : // NewIterWithContext is like NewIter, and additionally accepts a context for
1246 : // tracing.
1247 1 : func (b *Batch) NewIterWithContext(ctx context.Context, o *IterOptions) (*Iterator, error) {
1248 1 : if b.index == nil {
1249 0 : return nil, ErrNotIndexed
1250 0 : }
1251 1 : return b.db.newIter(ctx, b, newIterOpts{}, o), nil
1252 : }
1253 :
1254 : // NewBatchOnlyIter constructs an iterator that only reads the contents of the
1255 : // batch, and does not overlay the batch mutations on top of the DB state.
1256 : //
1257 : // The returned Iterator observes all of the Batch's existing mutations, but
1258 : // no later mutations. Its view can be refreshed via RefreshBatchSnapshot or
1259 : // SetOptions().
1260 1 : func (b *Batch) NewBatchOnlyIter(ctx context.Context, o *IterOptions) (*Iterator, error) {
1261 1 : if b.index == nil {
1262 0 : return nil, ErrNotIndexed
1263 0 : }
1264 1 : return b.db.newIter(ctx, b, newIterOpts{batch: batchIterOpts{batchOnly: true}}, o), nil
1265 : }
1266 :
1267 : // newInternalIter creates a new internalIterator that iterates over the
1268 : // contents of the batch.
1269 1 : func (b *Batch) newInternalIter(o *IterOptions) *batchIter {
1270 1 : iter := &batchIter{}
1271 1 : b.initInternalIter(o, iter)
1272 1 : return iter
1273 1 : }
1274 :
1275 1 : func (b *Batch) initInternalIter(o *IterOptions, iter *batchIter) {
1276 1 : *iter = batchIter{
1277 1 : cmp: b.cmp,
1278 1 : batch: b,
1279 1 : iter: b.index.NewIter(o.GetLowerBound(), o.GetUpperBound()),
1280 1 : // NB: We explicitly do not propagate the batch snapshot to the point
1281 1 : // key iterator. Filtering point keys within the batch iterator can
1282 1 : // cause pathological behavior where a batch iterator advances
1283 1 : // significantly farther than necessary filtering many batch keys that
1284 1 : // are not visible at the batch sequence number. Instead, the merging
1285 1 : // iterator enforces bounds.
1286 1 : //
1287 1 : // For example, consider an engine that contains the committed keys
1288 1 : // 'bar' and 'bax', with no keys between them. Consider a batch
1289 1 : // containing keys 1,000 keys within the range [a,z]. All of the
1290 1 : // batch keys were added to the batch after the iterator was
1291 1 : // constructed, so they are not visible to the iterator. A call to
1292 1 : // SeekGE('bax') would seek the LSM iterators and discover the key
1293 1 : // 'bax'. It would also seek the batch iterator, landing on the key
1294 1 : // 'baz' but discover it that it's not visible. The batch iterator would
1295 1 : // next through the rest of the batch's keys, only to discover there are
1296 1 : // no visible keys greater than or equal to 'bax'.
1297 1 : //
1298 1 : // Filtering these batch points within the merging iterator ensures that
1299 1 : // the batch iterator never needs to iterate beyond 'baz', because it
1300 1 : // already found a smaller, visible key 'bax'.
1301 1 : snapshot: base.InternalKeySeqNumMax,
1302 1 : }
1303 1 : }
1304 :
1305 1 : func (b *Batch) newRangeDelIter(o *IterOptions, batchSnapshot uint64) *keyspan.Iter {
1306 1 : // Construct an iterator even if rangeDelIndex is nil, because it is allowed
1307 1 : // to refresh later, so we need the container to exist.
1308 1 : iter := new(keyspan.Iter)
1309 1 : b.initRangeDelIter(o, iter, batchSnapshot)
1310 1 : return iter
1311 1 : }
1312 :
1313 1 : func (b *Batch) initRangeDelIter(_ *IterOptions, iter *keyspan.Iter, batchSnapshot uint64) {
1314 1 : if b.rangeDelIndex == nil {
1315 1 : iter.Init(b.cmp, nil)
1316 1 : return
1317 1 : }
1318 :
1319 : // Fragment the range tombstones the first time a range deletion iterator is
1320 : // requested. The cached tombstones are invalidated if another range
1321 : // deletion tombstone is added to the batch. This cache is only guaranteed
1322 : // to be correct if we're opening an iterator to read at a batch sequence
1323 : // number at least as high as tombstonesSeqNum. The cache is guaranteed to
1324 : // include all tombstones up to tombstonesSeqNum, and if any additional
1325 : // tombstones were added after that sequence number the cache would've been
1326 : // cleared.
1327 1 : nextSeqNum := b.nextSeqNum()
1328 1 : if b.tombstones != nil && b.tombstonesSeqNum <= batchSnapshot {
1329 1 : iter.Init(b.cmp, b.tombstones)
1330 1 : return
1331 1 : }
1332 :
1333 1 : tombstones := make([]keyspan.Span, 0, b.countRangeDels)
1334 1 : frag := &keyspan.Fragmenter{
1335 1 : Cmp: b.cmp,
1336 1 : Format: b.formatKey,
1337 1 : Emit: func(s keyspan.Span) {
1338 1 : tombstones = append(tombstones, s)
1339 1 : },
1340 : }
1341 1 : it := &batchIter{
1342 1 : cmp: b.cmp,
1343 1 : batch: b,
1344 1 : iter: b.rangeDelIndex.NewIter(nil, nil),
1345 1 : snapshot: batchSnapshot,
1346 1 : }
1347 1 : fragmentRangeDels(frag, it, int(b.countRangeDels))
1348 1 : iter.Init(b.cmp, tombstones)
1349 1 :
1350 1 : // If we just read all the tombstones in the batch (eg, batchSnapshot was
1351 1 : // set to b.nextSeqNum()), then cache the tombstones so that a subsequent
1352 1 : // call to initRangeDelIter may use them without refragmenting.
1353 1 : if nextSeqNum == batchSnapshot {
1354 1 : b.tombstones = tombstones
1355 1 : b.tombstonesSeqNum = nextSeqNum
1356 1 : }
1357 : }
1358 :
1359 1 : func fragmentRangeDels(frag *keyspan.Fragmenter, it internalIterator, count int) {
1360 1 : // The memory management here is a bit subtle. The keys and values returned
1361 1 : // by the iterator are slices in Batch.data. Thus the fragmented tombstones
1362 1 : // are slices within Batch.data. If additional entries are added to the
1363 1 : // Batch, Batch.data may be reallocated. The references in the fragmented
1364 1 : // tombstones will remain valid, pointing into the old Batch.data. GC for
1365 1 : // the win.
1366 1 :
1367 1 : // Use a single []keyspan.Key buffer to avoid allocating many
1368 1 : // individual []keyspan.Key slices with a single element each.
1369 1 : keyBuf := make([]keyspan.Key, 0, count)
1370 1 : for key, val := it.First(); key != nil; key, val = it.Next() {
1371 1 : s := rangedel.Decode(*key, val.InPlaceValue(), keyBuf)
1372 1 : keyBuf = s.Keys[len(s.Keys):]
1373 1 :
1374 1 : // Set a fixed capacity to avoid accidental overwriting.
1375 1 : s.Keys = s.Keys[:len(s.Keys):len(s.Keys)]
1376 1 : frag.Add(s)
1377 1 : }
1378 1 : frag.Finish()
1379 : }
1380 :
1381 1 : func (b *Batch) newRangeKeyIter(o *IterOptions, batchSnapshot uint64) *keyspan.Iter {
1382 1 : // Construct an iterator even if rangeKeyIndex is nil, because it is allowed
1383 1 : // to refresh later, so we need the container to exist.
1384 1 : iter := new(keyspan.Iter)
1385 1 : b.initRangeKeyIter(o, iter, batchSnapshot)
1386 1 : return iter
1387 1 : }
1388 :
1389 1 : func (b *Batch) initRangeKeyIter(_ *IterOptions, iter *keyspan.Iter, batchSnapshot uint64) {
1390 1 : if b.rangeKeyIndex == nil {
1391 1 : iter.Init(b.cmp, nil)
1392 1 : return
1393 1 : }
1394 :
1395 : // Fragment the range keys the first time a range key iterator is requested.
1396 : // The cached spans are invalidated if another range key is added to the
1397 : // batch. This cache is only guaranteed to be correct if we're opening an
1398 : // iterator to read at a batch sequence number at least as high as
1399 : // rangeKeysSeqNum. The cache is guaranteed to include all range keys up to
1400 : // rangeKeysSeqNum, and if any additional range keys were added after that
1401 : // sequence number the cache would've been cleared.
1402 1 : nextSeqNum := b.nextSeqNum()
1403 1 : if b.rangeKeys != nil && b.rangeKeysSeqNum <= batchSnapshot {
1404 1 : iter.Init(b.cmp, b.rangeKeys)
1405 1 : return
1406 1 : }
1407 :
1408 1 : rangeKeys := make([]keyspan.Span, 0, b.countRangeKeys)
1409 1 : frag := &keyspan.Fragmenter{
1410 1 : Cmp: b.cmp,
1411 1 : Format: b.formatKey,
1412 1 : Emit: func(s keyspan.Span) {
1413 1 : rangeKeys = append(rangeKeys, s)
1414 1 : },
1415 : }
1416 1 : it := &batchIter{
1417 1 : cmp: b.cmp,
1418 1 : batch: b,
1419 1 : iter: b.rangeKeyIndex.NewIter(nil, nil),
1420 1 : snapshot: batchSnapshot,
1421 1 : }
1422 1 : fragmentRangeKeys(frag, it, int(b.countRangeKeys))
1423 1 : iter.Init(b.cmp, rangeKeys)
1424 1 :
1425 1 : // If we just read all the range keys in the batch (eg, batchSnapshot was
1426 1 : // set to b.nextSeqNum()), then cache the range keys so that a subsequent
1427 1 : // call to initRangeKeyIter may use them without refragmenting.
1428 1 : if nextSeqNum == batchSnapshot {
1429 1 : b.rangeKeys = rangeKeys
1430 1 : b.rangeKeysSeqNum = nextSeqNum
1431 1 : }
1432 : }
1433 :
1434 1 : func fragmentRangeKeys(frag *keyspan.Fragmenter, it internalIterator, count int) error {
1435 1 : // The memory management here is a bit subtle. The keys and values
1436 1 : // returned by the iterator are slices in Batch.data. Thus the
1437 1 : // fragmented key spans are slices within Batch.data. If additional
1438 1 : // entries are added to the Batch, Batch.data may be reallocated. The
1439 1 : // references in the fragmented keys will remain valid, pointing into
1440 1 : // the old Batch.data. GC for the win.
1441 1 :
1442 1 : // Use a single []keyspan.Key buffer to avoid allocating many
1443 1 : // individual []keyspan.Key slices with a single element each.
1444 1 : keyBuf := make([]keyspan.Key, 0, count)
1445 1 : for ik, val := it.First(); ik != nil; ik, val = it.Next() {
1446 1 : s, err := rangekey.Decode(*ik, val.InPlaceValue(), keyBuf)
1447 1 : if err != nil {
1448 0 : return err
1449 0 : }
1450 1 : keyBuf = s.Keys[len(s.Keys):]
1451 1 :
1452 1 : // Set a fixed capacity to avoid accidental overwriting.
1453 1 : s.Keys = s.Keys[:len(s.Keys):len(s.Keys)]
1454 1 : frag.Add(s)
1455 : }
1456 1 : frag.Finish()
1457 1 : return nil
1458 : }
1459 :
1460 : // Commit applies the batch to its parent writer.
1461 1 : func (b *Batch) Commit(o *WriteOptions) error {
1462 1 : return b.db.Apply(b, o)
1463 1 : }
1464 :
1465 : // Close closes the batch without committing it.
1466 1 : func (b *Batch) Close() error {
1467 1 : // The storage engine commit pipeline may retain a pointer to b.data beyond
1468 1 : // when Commit() returns. This is possible when configured for WAL failover;
1469 1 : // we don't know if we might need to read the batch data again until the
1470 1 : // batch has been durably synced [even if the committer doesn't care to wait
1471 1 : // for the sync and Sync()=false].
1472 1 : //
1473 1 : // We still want to recycle these batches. The b.lifecycle atomic negotiates
1474 1 : // the batch's lifecycle. If the commit pipeline still might read b.data,
1475 1 : // b.lifecycle will be nonzeroed [the low bits hold a ref count].
1476 1 : for {
1477 1 : v := b.lifecycle.Load()
1478 1 : switch {
1479 1 : case v == 0:
1480 1 : // A zero value indicates that the commit pipeline has no
1481 1 : // outstanding references to the batch. The commit pipeline is
1482 1 : // required to acquire a ref synchronously, so there is no risk that
1483 1 : // the commit pipeline will grab a ref after the call to release. We
1484 1 : // can simply release the batch.
1485 1 : b.release()
1486 1 : return nil
1487 1 : case (v & batchClosedBit) != 0:
1488 1 : // The batch has a batchClosedBit: This batch has already been closed.
1489 1 : return ErrClosed
1490 1 : default:
1491 1 : // There's an outstanding reference. Set the batch released bit so
1492 1 : // that the commit pipeline knows it should release the batch when
1493 1 : // it unrefs.
1494 1 : if b.lifecycle.CompareAndSwap(v, v|batchClosedBit) {
1495 1 : return nil
1496 1 : }
1497 : // CAS Failed—this indicates the outstanding reference just
1498 : // decremented (or the caller illegally closed the batch twice).
1499 : // Loop to reload.
1500 : }
1501 : }
1502 : }
1503 :
1504 : // Indexed returns true if the batch is indexed (i.e. supports read
1505 : // operations).
1506 1 : func (b *Batch) Indexed() bool {
1507 1 : return b.index != nil
1508 1 : }
1509 :
1510 : // init ensures that the batch data slice is initialized to meet the
1511 : // minimum required size and allocates space for the batch header.
1512 1 : func (b *Batch) init(size int) {
1513 1 : n := batchInitialSize
1514 1 : for n < size {
1515 1 : n *= 2
1516 1 : }
1517 1 : if cap(b.data) < n {
1518 1 : b.data = rawalloc.New(batchrepr.HeaderLen, n)
1519 1 : }
1520 1 : b.data = b.data[:batchrepr.HeaderLen]
1521 1 : clear(b.data) // Zero the sequence number in the header
1522 : }
1523 :
1524 : // Reset resets the batch for reuse. The underlying byte slice (that is
1525 : // returned by Repr()) may not be modified. It is only necessary to call this
1526 : // method if a batch is explicitly being reused. Close automatically takes are
1527 : // of releasing resources when appropriate for batches that are internally
1528 : // being reused.
1529 1 : func (b *Batch) Reset() {
1530 1 : // In some configurations (WAL failover) the commit pipeline may retain
1531 1 : // b.data beyond a call to commit the batch. When this happens, b.lifecycle
1532 1 : // is nonzero (see the comment above b.lifecycle). In this case it's unsafe
1533 1 : // to mutate b.data, so we discard it. Note that Reset must not be called on
1534 1 : // a closed batch, so v > 0 implies a non-zero ref count and not
1535 1 : // batchClosedBit being set.
1536 1 : if v := b.lifecycle.Load(); v > 0 {
1537 1 : b.data = nil
1538 1 : }
1539 1 : b.reset()
1540 : }
1541 :
1542 1 : func (b *Batch) reset() {
1543 1 : // Zero out the struct, retaining only the fields necessary for manual
1544 1 : // reuse.
1545 1 : b.batchInternal = batchInternal{
1546 1 : data: b.data,
1547 1 : cmp: b.cmp,
1548 1 : formatKey: b.formatKey,
1549 1 : abbreviatedKey: b.abbreviatedKey,
1550 1 : index: b.index,
1551 1 : db: b.db,
1552 1 : }
1553 1 : b.applied.Store(false)
1554 1 : if b.data != nil {
1555 1 : if cap(b.data) > batchMaxRetainedSize {
1556 1 : // If the capacity of the buffer is larger than our maximum
1557 1 : // retention size, don't re-use it. Let it be GC-ed instead.
1558 1 : // This prevents the memory from an unusually large batch from
1559 1 : // being held on to indefinitely.
1560 1 : b.data = nil
1561 1 : } else {
1562 1 : // Otherwise, reset the buffer for re-use.
1563 1 : b.data = b.data[:batchrepr.HeaderLen]
1564 1 : clear(b.data)
1565 1 : }
1566 : }
1567 1 : if b.index != nil {
1568 1 : b.index.Init(&b.data, b.cmp, b.abbreviatedKey)
1569 1 : }
1570 : }
1571 :
1572 1 : func (b *Batch) grow(n int) {
1573 1 : newSize := len(b.data) + n
1574 1 : if uint64(newSize) >= maxBatchSize {
1575 1 : panic(ErrBatchTooLarge)
1576 : }
1577 1 : if newSize > cap(b.data) {
1578 1 : newCap := 2 * cap(b.data)
1579 1 : for newCap < newSize {
1580 1 : newCap *= 2
1581 1 : }
1582 1 : newData := rawalloc.New(len(b.data), newCap)
1583 1 : copy(newData, b.data)
1584 1 : b.data = newData
1585 : }
1586 1 : b.data = b.data[:newSize]
1587 : }
1588 :
1589 1 : func (b *Batch) setSeqNum(seqNum uint64) {
1590 1 : batchrepr.SetSeqNum(b.data, seqNum)
1591 1 : }
1592 :
1593 : // SeqNum returns the batch sequence number which is applied to the first
1594 : // record in the batch. The sequence number is incremented for each subsequent
1595 : // record. It returns zero if the batch is empty.
1596 1 : func (b *Batch) SeqNum() uint64 {
1597 1 : if len(b.data) == 0 {
1598 1 : b.init(batchrepr.HeaderLen)
1599 1 : }
1600 1 : return batchrepr.ReadSeqNum(b.data)
1601 : }
1602 :
1603 1 : func (b *Batch) setCount(v uint32) {
1604 1 : b.count = uint64(v)
1605 1 : }
1606 :
1607 : // Count returns the count of memtable-modifying operations in this batch. All
1608 : // operations with the except of LogData increment this count. For IngestSSTs,
1609 : // count is only used to indicate the number of SSTs ingested in the record, the
1610 : // batch isn't applied to the memtable.
1611 1 : func (b *Batch) Count() uint32 {
1612 1 : if b.count > math.MaxUint32 {
1613 1 : panic(batchrepr.ErrInvalidBatch)
1614 : }
1615 1 : return uint32(b.count)
1616 : }
1617 :
1618 : // Reader returns a batchrepr.Reader for the current batch contents. If the
1619 : // batch is mutated, the new entries will not be visible to the reader.
1620 1 : func (b *Batch) Reader() batchrepr.Reader {
1621 1 : if len(b.data) == 0 {
1622 1 : b.init(batchrepr.HeaderLen)
1623 1 : }
1624 1 : return batchrepr.Read(b.data)
1625 : }
1626 :
1627 : // SyncWait is to be used in conjunction with DB.ApplyNoSyncWait.
1628 1 : func (b *Batch) SyncWait() error {
1629 1 : now := time.Now()
1630 1 : b.fsyncWait.Wait()
1631 1 : if b.commitErr != nil {
1632 0 : b.db = nil // prevent batch reuse on error
1633 0 : }
1634 1 : waitDuration := time.Since(now)
1635 1 : b.commitStats.CommitWaitDuration += waitDuration
1636 1 : b.commitStats.TotalDuration += waitDuration
1637 1 : return b.commitErr
1638 : }
1639 :
1640 : // CommitStats returns stats related to committing the batch. Should be called
1641 : // after Batch.Commit, DB.Apply. If DB.ApplyNoSyncWait is used, should be
1642 : // called after Batch.SyncWait.
1643 1 : func (b *Batch) CommitStats() BatchCommitStats {
1644 1 : return b.commitStats
1645 1 : }
1646 :
1647 : // Note: batchIter mirrors the implementation of flushableBatchIter. Keep the
1648 : // two in sync.
1649 : type batchIter struct {
1650 : cmp Compare
1651 : batch *Batch
1652 : iter batchskl.Iterator
1653 : err error
1654 : // snapshot holds a batch "sequence number" at which the batch is being
1655 : // read. This sequence number has the InternalKeySeqNumBatch bit set, so it
1656 : // encodes an offset within the batch. Only batch entries earlier than the
1657 : // offset are visible during iteration.
1658 : snapshot uint64
1659 : }
1660 :
1661 : // batchIter implements the base.InternalIterator interface.
1662 : var _ base.InternalIterator = (*batchIter)(nil)
1663 :
1664 0 : func (i *batchIter) String() string {
1665 0 : return "batch"
1666 0 : }
1667 :
1668 1 : func (i *batchIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, base.LazyValue) {
1669 1 : // Ignore TrySeekUsingNext if the view of the batch changed.
1670 1 : if flags.TrySeekUsingNext() && flags.BatchJustRefreshed() {
1671 1 : flags = flags.DisableTrySeekUsingNext()
1672 1 : }
1673 :
1674 1 : i.err = nil // clear cached iteration error
1675 1 : ikey := i.iter.SeekGE(key, flags)
1676 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1677 0 : ikey = i.iter.Next()
1678 0 : }
1679 1 : if ikey == nil {
1680 1 : return nil, base.LazyValue{}
1681 1 : }
1682 1 : return ikey, base.MakeInPlaceValue(i.value())
1683 : }
1684 :
1685 : func (i *batchIter) SeekPrefixGE(
1686 : prefix, key []byte, flags base.SeekGEFlags,
1687 1 : ) (*base.InternalKey, base.LazyValue) {
1688 1 : i.err = nil // clear cached iteration error
1689 1 : return i.SeekGE(key, flags)
1690 1 : }
1691 :
1692 1 : func (i *batchIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, base.LazyValue) {
1693 1 : i.err = nil // clear cached iteration error
1694 1 : ikey := i.iter.SeekLT(key)
1695 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1696 0 : ikey = i.iter.Prev()
1697 0 : }
1698 1 : if ikey == nil {
1699 1 : return nil, base.LazyValue{}
1700 1 : }
1701 1 : return ikey, base.MakeInPlaceValue(i.value())
1702 : }
1703 :
1704 1 : func (i *batchIter) First() (*InternalKey, base.LazyValue) {
1705 1 : i.err = nil // clear cached iteration error
1706 1 : ikey := i.iter.First()
1707 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1708 1 : ikey = i.iter.Next()
1709 1 : }
1710 1 : if ikey == nil {
1711 1 : return nil, base.LazyValue{}
1712 1 : }
1713 1 : return ikey, base.MakeInPlaceValue(i.value())
1714 : }
1715 :
1716 1 : func (i *batchIter) Last() (*InternalKey, base.LazyValue) {
1717 1 : i.err = nil // clear cached iteration error
1718 1 : ikey := i.iter.Last()
1719 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1720 0 : ikey = i.iter.Prev()
1721 0 : }
1722 1 : if ikey == nil {
1723 0 : return nil, base.LazyValue{}
1724 0 : }
1725 1 : return ikey, base.MakeInPlaceValue(i.value())
1726 : }
1727 :
1728 1 : func (i *batchIter) Next() (*InternalKey, base.LazyValue) {
1729 1 : ikey := i.iter.Next()
1730 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1731 1 : ikey = i.iter.Next()
1732 1 : }
1733 1 : if ikey == nil {
1734 1 : return nil, base.LazyValue{}
1735 1 : }
1736 1 : return ikey, base.MakeInPlaceValue(i.value())
1737 : }
1738 :
1739 0 : func (i *batchIter) NextPrefix(succKey []byte) (*InternalKey, LazyValue) {
1740 0 : // Because NextPrefix was invoked `succKey` must be ≥ the key at i's current
1741 0 : // position. Seek the arena iterator using TrySeekUsingNext.
1742 0 : ikey := i.iter.SeekGE(succKey, base.SeekGEFlagsNone.EnableTrySeekUsingNext())
1743 0 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1744 0 : ikey = i.iter.Next()
1745 0 : }
1746 0 : if ikey == nil {
1747 0 : return nil, base.LazyValue{}
1748 0 : }
1749 0 : return ikey, base.MakeInPlaceValue(i.value())
1750 : }
1751 :
1752 1 : func (i *batchIter) Prev() (*InternalKey, base.LazyValue) {
1753 1 : ikey := i.iter.Prev()
1754 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1755 0 : ikey = i.iter.Prev()
1756 0 : }
1757 1 : if ikey == nil {
1758 1 : return nil, base.LazyValue{}
1759 1 : }
1760 1 : return ikey, base.MakeInPlaceValue(i.value())
1761 : }
1762 :
1763 1 : func (i *batchIter) value() []byte {
1764 1 : offset, _, keyEnd := i.iter.KeyInfo()
1765 1 : data := i.batch.data
1766 1 : if len(data[offset:]) == 0 {
1767 0 : i.err = base.CorruptionErrorf("corrupted batch")
1768 0 : return nil
1769 0 : }
1770 :
1771 1 : switch InternalKeyKind(data[offset]) {
1772 : case InternalKeyKindSet, InternalKeyKindMerge, InternalKeyKindRangeDelete,
1773 : InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete,
1774 1 : InternalKeyKindDeleteSized:
1775 1 : _, value, ok := batchrepr.DecodeStr(data[keyEnd:])
1776 1 : if !ok {
1777 0 : return nil
1778 0 : }
1779 1 : return value
1780 1 : default:
1781 1 : return nil
1782 : }
1783 : }
1784 :
1785 1 : func (i *batchIter) Error() error {
1786 1 : return i.err
1787 1 : }
1788 :
1789 1 : func (i *batchIter) Close() error {
1790 1 : _ = i.iter.Close()
1791 1 : return i.err
1792 1 : }
1793 :
1794 0 : func (i *batchIter) SetBounds(lower, upper []byte) {
1795 0 : i.iter.SetBounds(lower, upper)
1796 0 : }
1797 :
1798 0 : func (i *batchIter) SetContext(_ context.Context) {}
1799 :
1800 : type flushableBatchEntry struct {
1801 : // offset is the byte offset of the record within the batch repr.
1802 : offset uint32
1803 : // index is the 0-based ordinal number of the record within the batch. Used
1804 : // to compute the seqnum for the record.
1805 : index uint32
1806 : // key{Start,End} are the start and end byte offsets of the key within the
1807 : // batch repr. Cached to avoid decoding the key length on every
1808 : // comparison. The value is stored starting at keyEnd.
1809 : keyStart uint32
1810 : keyEnd uint32
1811 : }
1812 :
1813 : // flushableBatch wraps an existing batch and provides the interfaces needed
1814 : // for making the batch flushable (i.e. able to mimic a memtable).
1815 : type flushableBatch struct {
1816 : cmp Compare
1817 : formatKey base.FormatKey
1818 : data []byte
1819 :
1820 : // The base sequence number for the entries in the batch. This is the same
1821 : // value as Batch.seqNum() and is cached here for performance.
1822 : seqNum uint64
1823 :
1824 : // A slice of offsets and indices for the entries in the batch. Used to
1825 : // implement flushableBatchIter. Unlike the indexing on a normal batch, a
1826 : // flushable batch is indexed such that batch entry i will be given the
1827 : // sequence number flushableBatch.seqNum+i.
1828 : //
1829 : // Sorted in increasing order of key and decreasing order of offset (since
1830 : // higher offsets correspond to higher sequence numbers).
1831 : //
1832 : // Does not include range deletion entries or range key entries.
1833 : offsets []flushableBatchEntry
1834 :
1835 : // Fragmented range deletion tombstones.
1836 : tombstones []keyspan.Span
1837 :
1838 : // Fragmented range keys.
1839 : rangeKeys []keyspan.Span
1840 : }
1841 :
1842 : var _ flushable = (*flushableBatch)(nil)
1843 :
1844 : // newFlushableBatch creates a new batch that implements the flushable
1845 : // interface. This allows the batch to act like a memtable and be placed in the
1846 : // queue of flushable memtables. Note that the flushable batch takes ownership
1847 : // of the batch data.
1848 1 : func newFlushableBatch(batch *Batch, comparer *Comparer) (*flushableBatch, error) {
1849 1 : b := &flushableBatch{
1850 1 : data: batch.data,
1851 1 : cmp: comparer.Compare,
1852 1 : formatKey: comparer.FormatKey,
1853 1 : offsets: make([]flushableBatchEntry, 0, batch.Count()),
1854 1 : }
1855 1 : if b.data != nil {
1856 1 : // Note that this sequence number is not correct when this batch has not
1857 1 : // been applied since the sequence number has not been assigned yet. The
1858 1 : // correct sequence number will be set later. But it is correct when the
1859 1 : // batch is being replayed from the WAL.
1860 1 : b.seqNum = batch.SeqNum()
1861 1 : }
1862 1 : var rangeDelOffsets []flushableBatchEntry
1863 1 : var rangeKeyOffsets []flushableBatchEntry
1864 1 : if len(b.data) > batchrepr.HeaderLen {
1865 1 : // Non-empty batch.
1866 1 : var index uint32
1867 1 : for iter := batchrepr.Read(b.data); len(iter) > 0; {
1868 1 : offset := uintptr(unsafe.Pointer(&iter[0])) - uintptr(unsafe.Pointer(&b.data[0]))
1869 1 : kind, key, _, ok, err := iter.Next()
1870 1 : if !ok {
1871 0 : if err != nil {
1872 0 : return nil, err
1873 0 : }
1874 0 : break
1875 : }
1876 1 : entry := flushableBatchEntry{
1877 1 : offset: uint32(offset),
1878 1 : index: uint32(index),
1879 1 : }
1880 1 : if keySize := uint32(len(key)); keySize == 0 {
1881 1 : // Must add 2 to the offset. One byte encodes `kind` and the next
1882 1 : // byte encodes `0`, which is the length of the key.
1883 1 : entry.keyStart = uint32(offset) + 2
1884 1 : entry.keyEnd = entry.keyStart
1885 1 : } else {
1886 1 : entry.keyStart = uint32(uintptr(unsafe.Pointer(&key[0])) -
1887 1 : uintptr(unsafe.Pointer(&b.data[0])))
1888 1 : entry.keyEnd = entry.keyStart + keySize
1889 1 : }
1890 1 : switch kind {
1891 1 : case InternalKeyKindRangeDelete:
1892 1 : rangeDelOffsets = append(rangeDelOffsets, entry)
1893 1 : case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
1894 1 : rangeKeyOffsets = append(rangeKeyOffsets, entry)
1895 1 : case InternalKeyKindLogData:
1896 1 : // Skip it; we never want to iterate over LogDatas.
1897 1 : continue
1898 : case InternalKeyKindSet, InternalKeyKindDelete, InternalKeyKindMerge,
1899 1 : InternalKeyKindSingleDelete, InternalKeyKindSetWithDelete, InternalKeyKindDeleteSized:
1900 1 : b.offsets = append(b.offsets, entry)
1901 0 : default:
1902 0 : // Note In some circumstances this might be temporary memory
1903 0 : // corruption that can be recovered by discarding the batch and
1904 0 : // trying again. In other cases, the batch repr might've been
1905 0 : // already persisted elsewhere, and we'll loop continuously trying
1906 0 : // to commit the same corrupted batch. The caller is responsible for
1907 0 : // distinguishing.
1908 0 : return nil, errors.Wrapf(ErrInvalidBatch, "unrecognized kind %v", kind)
1909 : }
1910 : // NB: index (used for entry.offset above) must not reach the
1911 : // batch.count, because the offset is used in conjunction with the
1912 : // batch's sequence number to assign sequence numbers to keys within
1913 : // the batch. If we assign KV's indexes as high as batch.count,
1914 : // we'll begin assigning keys sequence numbers that weren't
1915 : // allocated.
1916 1 : if index >= uint32(batch.count) {
1917 0 : return nil, base.AssertionFailedf("pebble: batch entry index %d ≥ batch.count %d", index, batch.count)
1918 0 : }
1919 1 : index++
1920 : }
1921 : }
1922 :
1923 : // Sort all of offsets, rangeDelOffsets and rangeKeyOffsets, using *batch's
1924 : // sort.Interface implementation.
1925 1 : pointOffsets := b.offsets
1926 1 : sort.Sort(b)
1927 1 : b.offsets = rangeDelOffsets
1928 1 : sort.Sort(b)
1929 1 : b.offsets = rangeKeyOffsets
1930 1 : sort.Sort(b)
1931 1 : b.offsets = pointOffsets
1932 1 :
1933 1 : if len(rangeDelOffsets) > 0 {
1934 1 : frag := &keyspan.Fragmenter{
1935 1 : Cmp: b.cmp,
1936 1 : Format: b.formatKey,
1937 1 : Emit: func(s keyspan.Span) {
1938 1 : b.tombstones = append(b.tombstones, s)
1939 1 : },
1940 : }
1941 1 : it := &flushableBatchIter{
1942 1 : batch: b,
1943 1 : data: b.data,
1944 1 : offsets: rangeDelOffsets,
1945 1 : cmp: b.cmp,
1946 1 : index: -1,
1947 1 : }
1948 1 : fragmentRangeDels(frag, it, len(rangeDelOffsets))
1949 : }
1950 1 : if len(rangeKeyOffsets) > 0 {
1951 1 : frag := &keyspan.Fragmenter{
1952 1 : Cmp: b.cmp,
1953 1 : Format: b.formatKey,
1954 1 : Emit: func(s keyspan.Span) {
1955 1 : b.rangeKeys = append(b.rangeKeys, s)
1956 1 : },
1957 : }
1958 1 : it := &flushableBatchIter{
1959 1 : batch: b,
1960 1 : data: b.data,
1961 1 : offsets: rangeKeyOffsets,
1962 1 : cmp: b.cmp,
1963 1 : index: -1,
1964 1 : }
1965 1 : fragmentRangeKeys(frag, it, len(rangeKeyOffsets))
1966 : }
1967 1 : return b, nil
1968 : }
1969 :
1970 1 : func (b *flushableBatch) setSeqNum(seqNum uint64) {
1971 1 : if b.seqNum != 0 {
1972 0 : panic(fmt.Sprintf("pebble: flushableBatch.seqNum already set: %d", b.seqNum))
1973 : }
1974 1 : b.seqNum = seqNum
1975 1 : for i := range b.tombstones {
1976 1 : for j := range b.tombstones[i].Keys {
1977 1 : b.tombstones[i].Keys[j].Trailer = base.MakeTrailer(
1978 1 : b.tombstones[i].Keys[j].SeqNum()+seqNum,
1979 1 : b.tombstones[i].Keys[j].Kind(),
1980 1 : )
1981 1 : }
1982 : }
1983 1 : for i := range b.rangeKeys {
1984 1 : for j := range b.rangeKeys[i].Keys {
1985 1 : b.rangeKeys[i].Keys[j].Trailer = base.MakeTrailer(
1986 1 : b.rangeKeys[i].Keys[j].SeqNum()+seqNum,
1987 1 : b.rangeKeys[i].Keys[j].Kind(),
1988 1 : )
1989 1 : }
1990 : }
1991 : }
1992 :
1993 1 : func (b *flushableBatch) Len() int {
1994 1 : return len(b.offsets)
1995 1 : }
1996 :
1997 1 : func (b *flushableBatch) Less(i, j int) bool {
1998 1 : ei := &b.offsets[i]
1999 1 : ej := &b.offsets[j]
2000 1 : ki := b.data[ei.keyStart:ei.keyEnd]
2001 1 : kj := b.data[ej.keyStart:ej.keyEnd]
2002 1 : switch c := b.cmp(ki, kj); {
2003 1 : case c < 0:
2004 1 : return true
2005 1 : case c > 0:
2006 1 : return false
2007 1 : default:
2008 1 : return ei.offset > ej.offset
2009 : }
2010 : }
2011 :
2012 1 : func (b *flushableBatch) Swap(i, j int) {
2013 1 : b.offsets[i], b.offsets[j] = b.offsets[j], b.offsets[i]
2014 1 : }
2015 :
2016 : // newIter is part of the flushable interface.
2017 1 : func (b *flushableBatch) newIter(o *IterOptions) internalIterator {
2018 1 : return &flushableBatchIter{
2019 1 : batch: b,
2020 1 : data: b.data,
2021 1 : offsets: b.offsets,
2022 1 : cmp: b.cmp,
2023 1 : index: -1,
2024 1 : lower: o.GetLowerBound(),
2025 1 : upper: o.GetUpperBound(),
2026 1 : }
2027 1 : }
2028 :
2029 : // newFlushIter is part of the flushable interface.
2030 1 : func (b *flushableBatch) newFlushIter(o *IterOptions, bytesFlushed *uint64) internalIterator {
2031 1 : return &flushFlushableBatchIter{
2032 1 : flushableBatchIter: flushableBatchIter{
2033 1 : batch: b,
2034 1 : data: b.data,
2035 1 : offsets: b.offsets,
2036 1 : cmp: b.cmp,
2037 1 : index: -1,
2038 1 : },
2039 1 : bytesIterated: bytesFlushed,
2040 1 : }
2041 1 : }
2042 :
2043 : // newRangeDelIter is part of the flushable interface.
2044 1 : func (b *flushableBatch) newRangeDelIter(o *IterOptions) keyspan.FragmentIterator {
2045 1 : if len(b.tombstones) == 0 {
2046 1 : return nil
2047 1 : }
2048 1 : return keyspan.NewIter(b.cmp, b.tombstones)
2049 : }
2050 :
2051 : // newRangeKeyIter is part of the flushable interface.
2052 1 : func (b *flushableBatch) newRangeKeyIter(o *IterOptions) keyspan.FragmentIterator {
2053 1 : if len(b.rangeKeys) == 0 {
2054 1 : return nil
2055 1 : }
2056 1 : return keyspan.NewIter(b.cmp, b.rangeKeys)
2057 : }
2058 :
2059 : // containsRangeKeys is part of the flushable interface.
2060 0 : func (b *flushableBatch) containsRangeKeys() bool { return len(b.rangeKeys) > 0 }
2061 :
2062 : // inuseBytes is part of the flushable interface.
2063 1 : func (b *flushableBatch) inuseBytes() uint64 {
2064 1 : return uint64(len(b.data) - batchrepr.HeaderLen)
2065 1 : }
2066 :
2067 : // totalBytes is part of the flushable interface.
2068 1 : func (b *flushableBatch) totalBytes() uint64 {
2069 1 : return uint64(cap(b.data))
2070 1 : }
2071 :
2072 : // readyForFlush is part of the flushable interface.
2073 1 : func (b *flushableBatch) readyForFlush() bool {
2074 1 : // A flushable batch is always ready for flush; it must be flushed together
2075 1 : // with the previous memtable.
2076 1 : return true
2077 1 : }
2078 :
2079 : // computePossibleOverlaps is part of the flushable interface.
2080 : func (b *flushableBatch) computePossibleOverlaps(
2081 : fn func(bounded) shouldContinue, bounded ...bounded,
2082 1 : ) {
2083 1 : computePossibleOverlapsGenericImpl[*flushableBatch](b, b.cmp, fn, bounded)
2084 1 : }
2085 :
2086 : // Note: flushableBatchIter mirrors the implementation of batchIter. Keep the
2087 : // two in sync.
2088 : type flushableBatchIter struct {
2089 : // Members to be initialized by creator.
2090 : batch *flushableBatch
2091 : // The bytes backing the batch. Always the same as batch.data?
2092 : data []byte
2093 : // The sorted entries. This is not always equal to batch.offsets.
2094 : offsets []flushableBatchEntry
2095 : cmp Compare
2096 : // Must be initialized to -1. It is the index into offsets that represents
2097 : // the current iterator position.
2098 : index int
2099 :
2100 : // For internal use by the implementation.
2101 : key InternalKey
2102 : err error
2103 :
2104 : // Optionally initialize to bounds of iteration, if any.
2105 : lower []byte
2106 : upper []byte
2107 : }
2108 :
2109 : // flushableBatchIter implements the base.InternalIterator interface.
2110 : var _ base.InternalIterator = (*flushableBatchIter)(nil)
2111 :
2112 0 : func (i *flushableBatchIter) String() string {
2113 0 : return "flushable-batch"
2114 0 : }
2115 :
2116 : // SeekGE implements internalIterator.SeekGE, as documented in the pebble
2117 : // package. Ignore flags.TrySeekUsingNext() since we don't expect this
2118 : // optimization to provide much benefit here at the moment.
2119 : func (i *flushableBatchIter) SeekGE(
2120 : key []byte, flags base.SeekGEFlags,
2121 1 : ) (*InternalKey, base.LazyValue) {
2122 1 : i.err = nil // clear cached iteration error
2123 1 : ikey := base.MakeSearchKey(key)
2124 1 : i.index = sort.Search(len(i.offsets), func(j int) bool {
2125 1 : return base.InternalCompare(i.cmp, ikey, i.getKey(j)) <= 0
2126 1 : })
2127 1 : if i.index >= len(i.offsets) {
2128 1 : return nil, base.LazyValue{}
2129 1 : }
2130 1 : i.key = i.getKey(i.index)
2131 1 : if i.upper != nil && i.cmp(i.key.UserKey, i.upper) >= 0 {
2132 1 : i.index = len(i.offsets)
2133 1 : return nil, base.LazyValue{}
2134 1 : }
2135 1 : return &i.key, i.value()
2136 : }
2137 :
2138 : // SeekPrefixGE implements internalIterator.SeekPrefixGE, as documented in the
2139 : // pebble package.
2140 : func (i *flushableBatchIter) SeekPrefixGE(
2141 : prefix, key []byte, flags base.SeekGEFlags,
2142 1 : ) (*base.InternalKey, base.LazyValue) {
2143 1 : return i.SeekGE(key, flags)
2144 1 : }
2145 :
2146 : // SeekLT implements internalIterator.SeekLT, as documented in the pebble
2147 : // package.
2148 : func (i *flushableBatchIter) SeekLT(
2149 : key []byte, flags base.SeekLTFlags,
2150 1 : ) (*InternalKey, base.LazyValue) {
2151 1 : i.err = nil // clear cached iteration error
2152 1 : ikey := base.MakeSearchKey(key)
2153 1 : i.index = sort.Search(len(i.offsets), func(j int) bool {
2154 1 : return base.InternalCompare(i.cmp, ikey, i.getKey(j)) <= 0
2155 1 : })
2156 1 : i.index--
2157 1 : if i.index < 0 {
2158 1 : return nil, base.LazyValue{}
2159 1 : }
2160 1 : i.key = i.getKey(i.index)
2161 1 : if i.lower != nil && i.cmp(i.key.UserKey, i.lower) < 0 {
2162 1 : i.index = -1
2163 1 : return nil, base.LazyValue{}
2164 1 : }
2165 1 : return &i.key, i.value()
2166 : }
2167 :
2168 : // First implements internalIterator.First, as documented in the pebble
2169 : // package.
2170 1 : func (i *flushableBatchIter) First() (*InternalKey, base.LazyValue) {
2171 1 : i.err = nil // clear cached iteration error
2172 1 : if len(i.offsets) == 0 {
2173 1 : return nil, base.LazyValue{}
2174 1 : }
2175 1 : i.index = 0
2176 1 : i.key = i.getKey(i.index)
2177 1 : if i.upper != nil && i.cmp(i.key.UserKey, i.upper) >= 0 {
2178 1 : i.index = len(i.offsets)
2179 1 : return nil, base.LazyValue{}
2180 1 : }
2181 1 : return &i.key, i.value()
2182 : }
2183 :
2184 : // Last implements internalIterator.Last, as documented in the pebble
2185 : // package.
2186 1 : func (i *flushableBatchIter) Last() (*InternalKey, base.LazyValue) {
2187 1 : i.err = nil // clear cached iteration error
2188 1 : if len(i.offsets) == 0 {
2189 0 : return nil, base.LazyValue{}
2190 0 : }
2191 1 : i.index = len(i.offsets) - 1
2192 1 : i.key = i.getKey(i.index)
2193 1 : if i.lower != nil && i.cmp(i.key.UserKey, i.lower) < 0 {
2194 1 : i.index = -1
2195 1 : return nil, base.LazyValue{}
2196 1 : }
2197 1 : return &i.key, i.value()
2198 : }
2199 :
2200 : // Note: flushFlushableBatchIter.Next mirrors the implementation of
2201 : // flushableBatchIter.Next due to performance. Keep the two in sync.
2202 1 : func (i *flushableBatchIter) Next() (*InternalKey, base.LazyValue) {
2203 1 : if i.index == len(i.offsets) {
2204 0 : return nil, base.LazyValue{}
2205 0 : }
2206 1 : i.index++
2207 1 : if i.index == len(i.offsets) {
2208 1 : return nil, base.LazyValue{}
2209 1 : }
2210 1 : i.key = i.getKey(i.index)
2211 1 : if i.upper != nil && i.cmp(i.key.UserKey, i.upper) >= 0 {
2212 1 : i.index = len(i.offsets)
2213 1 : return nil, base.LazyValue{}
2214 1 : }
2215 1 : return &i.key, i.value()
2216 : }
2217 :
2218 1 : func (i *flushableBatchIter) Prev() (*InternalKey, base.LazyValue) {
2219 1 : if i.index < 0 {
2220 0 : return nil, base.LazyValue{}
2221 0 : }
2222 1 : i.index--
2223 1 : if i.index < 0 {
2224 1 : return nil, base.LazyValue{}
2225 1 : }
2226 1 : i.key = i.getKey(i.index)
2227 1 : if i.lower != nil && i.cmp(i.key.UserKey, i.lower) < 0 {
2228 1 : i.index = -1
2229 1 : return nil, base.LazyValue{}
2230 1 : }
2231 1 : return &i.key, i.value()
2232 : }
2233 :
2234 : // Note: flushFlushableBatchIter.NextPrefix mirrors the implementation of
2235 : // flushableBatchIter.NextPrefix due to performance. Keep the two in sync.
2236 0 : func (i *flushableBatchIter) NextPrefix(succKey []byte) (*InternalKey, LazyValue) {
2237 0 : return i.SeekGE(succKey, base.SeekGEFlagsNone.EnableTrySeekUsingNext())
2238 0 : }
2239 :
2240 1 : func (i *flushableBatchIter) getKey(index int) InternalKey {
2241 1 : e := &i.offsets[index]
2242 1 : kind := InternalKeyKind(i.data[e.offset])
2243 1 : key := i.data[e.keyStart:e.keyEnd]
2244 1 : return base.MakeInternalKey(key, i.batch.seqNum+uint64(e.index), kind)
2245 1 : }
2246 :
2247 1 : func (i *flushableBatchIter) value() base.LazyValue {
2248 1 : p := i.data[i.offsets[i.index].offset:]
2249 1 : if len(p) == 0 {
2250 0 : i.err = base.CorruptionErrorf("corrupted batch")
2251 0 : return base.LazyValue{}
2252 0 : }
2253 1 : kind := InternalKeyKind(p[0])
2254 1 : if kind > InternalKeyKindMax {
2255 0 : i.err = base.CorruptionErrorf("corrupted batch")
2256 0 : return base.LazyValue{}
2257 0 : }
2258 1 : var value []byte
2259 1 : var ok bool
2260 1 : switch kind {
2261 : case InternalKeyKindSet, InternalKeyKindMerge, InternalKeyKindRangeDelete,
2262 : InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete,
2263 1 : InternalKeyKindDeleteSized:
2264 1 : keyEnd := i.offsets[i.index].keyEnd
2265 1 : _, value, ok = batchrepr.DecodeStr(i.data[keyEnd:])
2266 1 : if !ok {
2267 0 : i.err = base.CorruptionErrorf("corrupted batch")
2268 0 : return base.LazyValue{}
2269 0 : }
2270 : }
2271 1 : return base.MakeInPlaceValue(value)
2272 : }
2273 :
2274 0 : func (i *flushableBatchIter) Valid() bool {
2275 0 : return i.index >= 0 && i.index < len(i.offsets)
2276 0 : }
2277 :
2278 1 : func (i *flushableBatchIter) Error() error {
2279 1 : return i.err
2280 1 : }
2281 :
2282 1 : func (i *flushableBatchIter) Close() error {
2283 1 : return i.err
2284 1 : }
2285 :
2286 1 : func (i *flushableBatchIter) SetBounds(lower, upper []byte) {
2287 1 : i.lower = lower
2288 1 : i.upper = upper
2289 1 : }
2290 :
2291 0 : func (i *flushableBatchIter) SetContext(_ context.Context) {}
2292 :
2293 : // flushFlushableBatchIter is similar to flushableBatchIter but it keeps track
2294 : // of number of bytes iterated.
2295 : type flushFlushableBatchIter struct {
2296 : flushableBatchIter
2297 : bytesIterated *uint64
2298 : }
2299 :
2300 : // flushFlushableBatchIter implements the base.InternalIterator interface.
2301 : var _ base.InternalIterator = (*flushFlushableBatchIter)(nil)
2302 :
2303 0 : func (i *flushFlushableBatchIter) String() string {
2304 0 : return "flushable-batch"
2305 0 : }
2306 :
2307 : func (i *flushFlushableBatchIter) SeekGE(
2308 : key []byte, flags base.SeekGEFlags,
2309 0 : ) (*InternalKey, base.LazyValue) {
2310 0 : panic("pebble: SeekGE unimplemented")
2311 : }
2312 :
2313 : func (i *flushFlushableBatchIter) SeekPrefixGE(
2314 : prefix, key []byte, flags base.SeekGEFlags,
2315 0 : ) (*base.InternalKey, base.LazyValue) {
2316 0 : panic("pebble: SeekPrefixGE unimplemented")
2317 : }
2318 :
2319 : func (i *flushFlushableBatchIter) SeekLT(
2320 : key []byte, flags base.SeekLTFlags,
2321 0 : ) (*InternalKey, base.LazyValue) {
2322 0 : panic("pebble: SeekLT unimplemented")
2323 : }
2324 :
2325 1 : func (i *flushFlushableBatchIter) First() (*InternalKey, base.LazyValue) {
2326 1 : i.err = nil // clear cached iteration error
2327 1 : key, val := i.flushableBatchIter.First()
2328 1 : if key == nil {
2329 0 : return nil, base.LazyValue{}
2330 0 : }
2331 1 : entryBytes := i.offsets[i.index].keyEnd - i.offsets[i.index].offset
2332 1 : *i.bytesIterated += uint64(entryBytes) + i.valueSize()
2333 1 : return key, val
2334 : }
2335 :
2336 0 : func (i *flushFlushableBatchIter) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) {
2337 0 : panic("pebble: Prev unimplemented")
2338 : }
2339 :
2340 : // Note: flushFlushableBatchIter.Next mirrors the implementation of
2341 : // flushableBatchIter.Next due to performance. Keep the two in sync.
2342 1 : func (i *flushFlushableBatchIter) Next() (*InternalKey, base.LazyValue) {
2343 1 : if i.index == len(i.offsets) {
2344 0 : return nil, base.LazyValue{}
2345 0 : }
2346 1 : i.index++
2347 1 : if i.index == len(i.offsets) {
2348 1 : return nil, base.LazyValue{}
2349 1 : }
2350 1 : i.key = i.getKey(i.index)
2351 1 : entryBytes := i.offsets[i.index].keyEnd - i.offsets[i.index].offset
2352 1 : *i.bytesIterated += uint64(entryBytes) + i.valueSize()
2353 1 : return &i.key, i.value()
2354 : }
2355 :
2356 0 : func (i flushFlushableBatchIter) Prev() (*InternalKey, base.LazyValue) {
2357 0 : panic("pebble: Prev unimplemented")
2358 : }
2359 :
2360 1 : func (i flushFlushableBatchIter) valueSize() uint64 {
2361 1 : p := i.data[i.offsets[i.index].offset:]
2362 1 : if len(p) == 0 {
2363 0 : i.err = base.CorruptionErrorf("corrupted batch")
2364 0 : return 0
2365 0 : }
2366 1 : kind := InternalKeyKind(p[0])
2367 1 : if kind > InternalKeyKindMax {
2368 0 : i.err = base.CorruptionErrorf("corrupted batch")
2369 0 : return 0
2370 0 : }
2371 1 : var length uint64
2372 1 : switch kind {
2373 1 : case InternalKeyKindSet, InternalKeyKindMerge, InternalKeyKindRangeDelete:
2374 1 : keyEnd := i.offsets[i.index].keyEnd
2375 1 : v, n := binary.Uvarint(i.data[keyEnd:])
2376 1 : if n <= 0 {
2377 0 : i.err = base.CorruptionErrorf("corrupted batch")
2378 0 : return 0
2379 0 : }
2380 1 : length = v + uint64(n)
2381 : }
2382 1 : return length
2383 : }
2384 :
2385 : // batchSort returns iterators for the sorted contents of the batch. It is
2386 : // intended for testing use only. The batch.Sort dance is done to prevent
2387 : // exposing this method in the public pebble interface.
2388 : func batchSort(
2389 : i interface{},
2390 : ) (
2391 : points internalIterator,
2392 : rangeDels keyspan.FragmentIterator,
2393 : rangeKeys keyspan.FragmentIterator,
2394 1 : ) {
2395 1 : b := i.(*Batch)
2396 1 : if b.Indexed() {
2397 1 : pointIter := b.newInternalIter(nil)
2398 1 : rangeDelIter := b.newRangeDelIter(nil, math.MaxUint64)
2399 1 : rangeKeyIter := b.newRangeKeyIter(nil, math.MaxUint64)
2400 1 : return pointIter, rangeDelIter, rangeKeyIter
2401 1 : }
2402 1 : f, err := newFlushableBatch(b, b.db.opts.Comparer)
2403 1 : if err != nil {
2404 0 : panic(err)
2405 : }
2406 1 : return f.newIter(nil), f.newRangeDelIter(nil), f.newRangeKeyIter(nil)
2407 : }
2408 :
2409 1 : func init() {
2410 1 : private.BatchSort = batchSort
2411 1 : }
|