Line data Source code
1 : // Copyright 2012 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package pebble
6 :
7 : import (
8 : "bytes"
9 : "context"
10 : "encoding/binary"
11 : "fmt"
12 : "io"
13 : "math"
14 : "sort"
15 : "sync"
16 : "sync/atomic"
17 : "time"
18 : "unsafe"
19 :
20 : "github.com/cockroachdb/crlib/crtime"
21 : "github.com/cockroachdb/errors"
22 : "github.com/cockroachdb/pebble/batchrepr"
23 : "github.com/cockroachdb/pebble/internal/base"
24 : "github.com/cockroachdb/pebble/internal/batchskl"
25 : "github.com/cockroachdb/pebble/internal/humanize"
26 : "github.com/cockroachdb/pebble/internal/invariants"
27 : "github.com/cockroachdb/pebble/internal/keyspan"
28 : "github.com/cockroachdb/pebble/internal/private"
29 : "github.com/cockroachdb/pebble/internal/rangedel"
30 : "github.com/cockroachdb/pebble/internal/rangekey"
31 : "github.com/cockroachdb/pebble/internal/rawalloc"
32 : "github.com/cockroachdb/pebble/internal/treeprinter"
33 : )
34 :
35 : const (
36 : invalidBatchCount = 1<<32 - 1
37 : maxVarintLen32 = 5
38 :
39 : defaultBatchInitialSize = 1 << 10 // 1 KB
40 : defaultBatchMaxRetainedSize = 1 << 20 // 1 MB
41 : )
42 :
43 : // ErrNotIndexed means that a read operation on a batch failed because the
44 : // batch is not indexed and thus doesn't support reads.
45 : var ErrNotIndexed = errors.New("pebble: batch not indexed")
46 :
47 : // ErrInvalidBatch indicates that a batch is invalid or otherwise corrupted.
48 : var ErrInvalidBatch = batchrepr.ErrInvalidBatch
49 :
50 : // ErrBatchTooLarge indicates that a batch is invalid or otherwise corrupted.
51 : var ErrBatchTooLarge = base.MarkCorruptionError(errors.Newf("pebble: batch too large: >= %s", humanize.Bytes.Uint64(maxBatchSize)))
52 :
53 : // DeferredBatchOp represents a batch operation (eg. set, merge, delete) that is
54 : // being inserted into the batch. Indexing is not performed on the specified key
55 : // until Finish is called, hence the name deferred. This struct lets the caller
56 : // copy or encode keys/values directly into the batch representation instead of
57 : // copying into an intermediary buffer then having pebble.Batch copy off of it.
58 : type DeferredBatchOp struct {
59 : index *batchskl.Skiplist
60 :
61 : // Key and Value point to parts of the binary batch representation where
62 : // keys and values should be encoded/copied into. len(Key) and len(Value)
63 : // bytes must be copied into these slices respectively before calling
64 : // Finish(). Changing where these slices point to is not allowed.
65 : Key, Value []byte
66 : offset uint32
67 : }
68 :
69 : // Finish completes the addition of this batch operation, and adds it to the
70 : // index if necessary. Must be called once (and exactly once) keys/values
71 : // have been filled into Key and Value. Not calling Finish or not
72 : // copying/encoding keys will result in an incomplete index, and calling Finish
73 : // twice may result in a panic.
74 1 : func (d DeferredBatchOp) Finish() error {
75 1 : if d.index != nil {
76 0 : if err := d.index.Add(d.offset); err != nil {
77 0 : return err
78 0 : }
79 : }
80 1 : return nil
81 : }
82 :
83 : // A Batch is a sequence of Sets, Merges, Deletes, DeleteRanges, RangeKeySets,
84 : // RangeKeyUnsets, and/or RangeKeyDeletes that are applied atomically. Batch
85 : // implements the Reader interface, but only an indexed batch supports reading
86 : // (without error) via Get or NewIter. A non-indexed batch will return
87 : // ErrNotIndexed when read from. A batch is not safe for concurrent use, and
88 : // consumers should use a batch per goroutine or provide their own
89 : // synchronization.
90 : //
91 : // # Indexing
92 : //
93 : // Batches can be optionally indexed (see DB.NewIndexedBatch). An indexed batch
94 : // allows iteration via an Iterator (see Batch.NewIter). The iterator provides
95 : // a merged view of the operations in the batch and the underlying
96 : // database. This is implemented by treating the batch as an additional layer
97 : // in the LSM where every entry in the batch is considered newer than any entry
98 : // in the underlying database (batch entries have the InternalKeySeqNumBatch
99 : // bit set). By treating the batch as an additional layer in the LSM, iteration
100 : // supports all batch operations (i.e. Set, Merge, Delete, DeleteRange,
101 : // RangeKeySet, RangeKeyUnset, RangeKeyDelete) with minimal effort.
102 : //
103 : // The same key can be operated on multiple times in a batch, though only the
104 : // latest operation will be visible. For example, Put("a", "b"), Delete("a")
105 : // will cause the key "a" to not be visible in the batch. Put("a", "b"),
106 : // Put("a", "c") will cause a read of "a" to return the value "c".
107 : //
108 : // The batch index is implemented via an skiplist (internal/batchskl). While
109 : // the skiplist implementation is very fast, inserting into an indexed batch is
110 : // significantly slower than inserting into a non-indexed batch. Only use an
111 : // indexed batch if you require reading from it.
112 : //
113 : // # Atomic commit
114 : //
115 : // The operations in a batch are persisted by calling Batch.Commit which is
116 : // equivalent to calling DB.Apply(batch). A batch is committed atomically by
117 : // writing the internal batch representation to the WAL, adding all of the
118 : // batch operations to the memtable associated with the WAL, and then
119 : // incrementing the visible sequence number so that subsequent reads can see
120 : // the effects of the batch operations. If WriteOptions.Sync is true, a call to
121 : // Batch.Commit will guarantee that the batch is persisted to disk before
122 : // returning. See commitPipeline for more on the implementation details.
123 : //
124 : // # Large batches
125 : //
126 : // The size of a batch is limited only by available memory (be aware that
127 : // indexed batches require considerably additional memory for the skiplist
128 : // structure). A given WAL file has a single memtable associated with it (this
129 : // restriction could be removed, but doing so is onerous and complex). And a
130 : // memtable has a fixed size due to the underlying fixed size arena. Note that
131 : // this differs from RocksDB where a memtable can grow arbitrarily large using
132 : // a list of arena chunks. In RocksDB this is accomplished by storing pointers
133 : // in the arena memory, but that isn't possible in Go.
134 : //
135 : // During Batch.Commit, a batch which is larger than a threshold (>
136 : // MemTableSize/2) is wrapped in a flushableBatch and inserted into the queue
137 : // of memtables. A flushableBatch forces WAL to be rotated, but that happens
138 : // anyways when the memtable becomes full so this does not cause significant
139 : // WAL churn. Because the flushableBatch is readable as another layer in the
140 : // LSM, Batch.Commit returns as soon as the flushableBatch has been added to
141 : // the queue of memtables.
142 : //
143 : // Internally, a flushableBatch provides Iterator support by sorting the batch
144 : // contents (the batch is sorted once, when it is added to the memtable
145 : // queue). Sorting the batch contents and insertion of the contents into a
146 : // memtable have the same big-O time, but the constant factor dominates
147 : // here. Sorting is significantly faster and uses significantly less memory.
148 : //
149 : // # Internal representation
150 : //
151 : // The internal batch representation is a contiguous byte buffer with a fixed
152 : // 12-byte header, followed by a series of records.
153 : //
154 : // +-------------+------------+--- ... ---+
155 : // | SeqNum (8B) | Count (4B) | Entries |
156 : // +-------------+------------+--- ... ---+
157 : //
158 : // Each record has a 1-byte kind tag prefix, followed by 1 or 2 length prefixed
159 : // strings (varstring):
160 : //
161 : // +-----------+-----------------+-------------------+
162 : // | Kind (1B) | Key (varstring) | Value (varstring) |
163 : // +-----------+-----------------+-------------------+
164 : //
165 : // A varstring is a varint32 followed by N bytes of data. The Kind tags are
166 : // exactly those specified by InternalKeyKind. The following table shows the
167 : // format for records of each kind:
168 : //
169 : // InternalKeyKindDelete varstring
170 : // InternalKeyKindLogData varstring
171 : // InternalKeyKindIngestSST varstring
172 : // InternalKeyKindSet varstring varstring
173 : // InternalKeyKindMerge varstring varstring
174 : // InternalKeyKindRangeDelete varstring varstring
175 : // InternalKeyKindRangeKeySet varstring varstring
176 : // InternalKeyKindRangeKeyUnset varstring varstring
177 : // InternalKeyKindRangeKeyDelete varstring varstring
178 : //
179 : // The intuitive understanding here are that the arguments to Delete, Set,
180 : // Merge, DeleteRange and RangeKeyDelete are encoded into the batch. The
181 : // RangeKeySet and RangeKeyUnset operations are slightly more complicated,
182 : // encoding their end key, suffix and value [in the case of RangeKeySet] within
183 : // the Value varstring. For more information on the value encoding for
184 : // RangeKeySet and RangeKeyUnset, see the internal/rangekey package.
185 : //
186 : // The internal batch representation is the on disk format for a batch in the
187 : // WAL, and thus stable. New record kinds may be added, but the existing ones
188 : // will not be modified.
189 : type Batch struct {
190 : batchInternal
191 : applied atomic.Bool
192 : // lifecycle is used to negotiate the lifecycle of a Batch. A Batch and its
193 : // underlying batchInternal.data byte slice may be reused. There are two
194 : // mechanisms for reuse:
195 : //
196 : // 1. The caller may explicitly call [Batch.Reset] to reset the batch to be
197 : // empty (while retaining the underlying repr's buffer).
198 : // 2. The caller may call [Batch.Close], passing ownership off to Pebble,
199 : // which may reuse the batch's memory to service new callers to
200 : // [DB.NewBatch].
201 : //
202 : // There's a complication to reuse: When WAL failover is configured, the
203 : // Pebble commit pipeline may retain a pointer to the batch.data beyond the
204 : // return of [Batch.Commit]. The user of the Batch may commit their batch
205 : // and call Close or Reset before the commit pipeline is finished reading
206 : // the data slice. Recycling immediately would cause a data race.
207 : //
208 : // To resolve this data race, this [lifecycle] atomic is used to determine
209 : // safety and responsibility of reusing a batch. The low bits of the atomic
210 : // are used as a reference count (really just the lowest bit—in practice
211 : // there's only 1 code path that references). The [Batch] is passed into
212 : // [wal.Writer]'s WriteRecord method as a [RefCount] implementation. The
213 : // wal.Writer guarantees that if it will read [Batch.data] after the call to
214 : // WriteRecord returns, it will increment the reference count. When it's
215 : // complete, it'll unreference through invoking [Batch.Unref].
216 : //
217 : // When the committer of a batch indicates intent to recycle a Batch through
218 : // calling [Batch.Reset] or [Batch.Close], the lifecycle atomic is read. If
219 : // an outstanding reference remains, it's unsafe to reuse Batch.data yet. In
220 : // [Batch.Reset] the caller wants to reuse the [Batch] immediately, so we
221 : // discard b.data to recycle the struct but not the underlying byte slice.
222 : // In [Batch.Close], we set a special high bit [batchClosedBit] on lifecycle
223 : // that indicates that the user will not use [Batch] again and we're free to
224 : // recycle it when safe. When the commit pipeline eventually calls
225 : // [Batch.Unref], the [batchClosedBit] is noticed and the batch is
226 : // recycled.
227 : lifecycle atomic.Int32
228 : }
229 :
230 : // batchClosedBit is a bit stored on Batch.lifecycle to indicate that the user
231 : // called [Batch.Close] to release a Batch, but an open reference count
232 : // prevented immediate recycling.
233 : const batchClosedBit = 1 << 30
234 :
235 : // TODO(jackson): Hide the wal.RefCount implementation from the public Batch interface.
236 :
237 : // Ref implements wal.RefCount. If the WAL writer may need to read b.data after
238 : // it returns, it invokes Ref to increment the lifecycle's reference count. When
239 : // it's finished, it invokes Unref.
240 1 : func (b *Batch) Ref() {
241 1 : b.lifecycle.Add(+1)
242 1 : }
243 :
244 : // Unref implemets wal.RefCount.
245 1 : func (b *Batch) Unref() {
246 1 : if v := b.lifecycle.Add(-1); (v ^ batchClosedBit) == 0 {
247 1 : // The [batchClosedBit] high bit is set, and there are no outstanding
248 1 : // references. The user of the Batch called [Batch.Close], expecting the
249 1 : // batch to be recycled. However, our outstanding reference count
250 1 : // prevented recycling. As the last to dereference, we're now
251 1 : // responsible for releasing the batch.
252 1 : b.lifecycle.Store(0)
253 1 : b.release()
254 1 : }
255 : }
256 :
257 : // batchInternal contains the set of fields within Batch that are non-atomic and
258 : // capable of being reset using a *b = batchInternal{} struct copy.
259 : type batchInternal struct {
260 : // Data is the wire format of a batch's log entry:
261 : // - 8 bytes for a sequence number of the first batch element,
262 : // or zeroes if the batch has not yet been applied,
263 : // - 4 bytes for the count: the number of elements in the batch,
264 : // or "\xff\xff\xff\xff" if the batch is invalid,
265 : // - count elements, being:
266 : // - one byte for the kind
267 : // - the varint-string user key,
268 : // - the varint-string value (if kind != delete).
269 : // The sequence number and count are stored in little-endian order.
270 : //
271 : // The data field can be (but is not guaranteed to be) nil for new
272 : // batches. Large batches will set the data field to nil when committed as
273 : // the data has been moved to a flushableBatch and inserted into the queue of
274 : // memtables.
275 : data []byte
276 : comparer *base.Comparer
277 : opts batchOptions
278 :
279 : // An upper bound on required space to add this batch to a memtable.
280 : // Note that although batches are limited to 4 GiB in size, that limit
281 : // applies to len(data), not the memtable size. The upper bound on the
282 : // size of a memtable node is larger than the overhead of the batch's log
283 : // encoding, so memTableSize is larger than len(data) and may overflow a
284 : // uint32.
285 : memTableSize uint64
286 :
287 : // The db to which the batch will be committed. Do not change this field
288 : // after the batch has been created as it might invalidate internal state.
289 : // Batch.memTableSize is only refreshed if Batch.db is set. Setting db to
290 : // nil once it has been set implies that the Batch has encountered an error.
291 : db *DB
292 :
293 : // The count of records in the batch. This count will be stored in the batch
294 : // data whenever Repr() is called.
295 : count uint64
296 :
297 : // The count of range deletions in the batch. Updated every time a range
298 : // deletion is added.
299 : countRangeDels uint64
300 :
301 : // The count of range key sets, unsets and deletes in the batch. Updated
302 : // every time a RANGEKEYSET, RANGEKEYUNSET or RANGEKEYDEL key is added.
303 : countRangeKeys uint64
304 :
305 : // A deferredOp struct, stored in the Batch so that a pointer can be returned
306 : // from the *Deferred() methods rather than a value.
307 : deferredOp DeferredBatchOp
308 :
309 : // An optional skiplist keyed by offset into data of the entry.
310 : index *batchskl.Skiplist
311 : rangeDelIndex *batchskl.Skiplist
312 : rangeKeyIndex *batchskl.Skiplist
313 :
314 : // Fragmented range deletion tombstones. Cached the first time a range
315 : // deletion iterator is requested. The cache is invalidated whenever a new
316 : // range deletion is added to the batch. This cache can only be used when
317 : // opening an iterator to read at a batch sequence number >=
318 : // tombstonesSeqNum. This is the case for all new iterators created over a
319 : // batch but it's not the case for all cloned iterators.
320 : tombstones []keyspan.Span
321 : tombstonesSeqNum base.SeqNum
322 :
323 : // Fragmented range key spans. Cached the first time a range key iterator is
324 : // requested. The cache is invalidated whenever a new range key
325 : // (RangeKey{Set,Unset,Del}) is added to the batch. This cache can only be
326 : // used when opening an iterator to read at a batch sequence number >=
327 : // tombstonesSeqNum. This is the case for all new iterators created over a
328 : // batch but it's not the case for all cloned iterators.
329 : rangeKeys []keyspan.Span
330 : rangeKeysSeqNum base.SeqNum
331 :
332 : // The flushableBatch wrapper if the batch is too large to fit in the
333 : // memtable.
334 : flushable *flushableBatch
335 :
336 : // minimumFormatMajorVersion indicates the format major version required in
337 : // order to commit this batch. If an operation requires a particular format
338 : // major version, it ratchets the batch's minimumFormatMajorVersion. When
339 : // the batch is committed, this is validated against the database's current
340 : // format major version.
341 : minimumFormatMajorVersion FormatMajorVersion
342 :
343 : // Synchronous Apply uses the commit WaitGroup for both publishing the
344 : // seqnum and waiting for the WAL fsync (if needed). Asynchronous
345 : // ApplyNoSyncWait, which implies WriteOptions.Sync is true, uses the commit
346 : // WaitGroup for publishing the seqnum and the fsyncWait WaitGroup for
347 : // waiting for the WAL fsync.
348 : //
349 : // TODO(sumeer): if we find that ApplyNoSyncWait in conjunction with
350 : // SyncWait is causing higher memory usage because of the time duration
351 : // between when the sync is already done, and a goroutine calls SyncWait
352 : // (followed by Batch.Close), we could separate out {fsyncWait, commitErr}
353 : // into a separate struct that is allocated separately (using another
354 : // sync.Pool), and only that struct needs to outlive Batch.Close (which
355 : // could then be called immediately after ApplyNoSyncWait). commitStats
356 : // will also need to be in this separate struct.
357 : commit sync.WaitGroup
358 : fsyncWait sync.WaitGroup
359 :
360 : commitStats BatchCommitStats
361 :
362 : commitErr error
363 :
364 : // Position bools together to reduce the sizeof the struct.
365 :
366 : // ingestedSSTBatch indicates that the batch contains one or more key kinds
367 : // of InternalKeyKindIngestSST. If the batch contains key kinds of IngestSST
368 : // then it will only contain key kinds of IngestSST.
369 : ingestedSSTBatch bool
370 :
371 : // committing is set to true when a batch begins to commit. It's used to
372 : // ensure the batch is not mutated concurrently. It is not an atomic
373 : // deliberately, so as to avoid the overhead on batch mutations. This is
374 : // okay, because under correct usage this field will never be accessed
375 : // concurrently. It's only under incorrect usage the memory accesses of this
376 : // variable may violate memory safety. Since we don't use atomics here,
377 : // false negatives are possible.
378 : committing bool
379 : }
380 :
381 : // BatchCommitStats exposes stats related to committing a batch.
382 : //
383 : // NB: there is no Pebble internal tracing (using LoggerAndTracer) of slow
384 : // batch commits. The caller can use these stats to do their own tracing as
385 : // needed.
386 : type BatchCommitStats struct {
387 : // TotalDuration is the time spent in DB.{Apply,ApplyNoSyncWait} or
388 : // Batch.Commit, plus the time waiting in Batch.SyncWait. If there is a gap
389 : // between calling ApplyNoSyncWait and calling SyncWait, that gap could
390 : // include some duration in which real work was being done for the commit
391 : // and will not be included here. This missing time is considered acceptable
392 : // since the goal of these stats is to understand user-facing latency.
393 : //
394 : // TotalDuration includes time spent in various queues both inside Pebble
395 : // and outside Pebble (I/O queues, goroutine scheduler queue, mutex wait
396 : // etc.). For some of these queues (which we consider important) the wait
397 : // times are included below -- these expose low-level implementation detail
398 : // and are meant for expert diagnosis and subject to change. There may be
399 : // unaccounted time after subtracting those values from TotalDuration.
400 : TotalDuration time.Duration
401 : // SemaphoreWaitDuration is the wait time for semaphores in
402 : // commitPipeline.Commit.
403 : SemaphoreWaitDuration time.Duration
404 : // WALQueueWaitDuration is the wait time for allocating memory blocks in the
405 : // LogWriter (due to the LogWriter not writing fast enough). At the moment
406 : // this is duration is always zero because a single WAL will allow
407 : // allocating memory blocks up to the entire memtable size. In the future,
408 : // we may pipeline WALs and bound the WAL queued blocks separately, so this
409 : // field is preserved for that possibility.
410 : WALQueueWaitDuration time.Duration
411 : // MemTableWriteStallDuration is the wait caused by a write stall due to too
412 : // many memtables (due to not flushing fast enough).
413 : MemTableWriteStallDuration time.Duration
414 : // L0ReadAmpWriteStallDuration is the wait caused by a write stall due to
415 : // high read amplification in L0 (due to not compacting fast enough out of
416 : // L0).
417 : L0ReadAmpWriteStallDuration time.Duration
418 : // WALRotationDuration is the wait time for WAL rotation, which includes
419 : // syncing and closing the old WAL and creating (or reusing) a new one.
420 : WALRotationDuration time.Duration
421 : // CommitWaitDuration is the wait for publishing the seqnum plus the
422 : // duration for the WAL sync (if requested). The former should be tiny and
423 : // one can assume that this is all due to the WAL sync.
424 : CommitWaitDuration time.Duration
425 : }
426 :
427 : var _ Reader = (*Batch)(nil)
428 : var _ Writer = (*Batch)(nil)
429 :
430 : var batchPool = sync.Pool{
431 1 : New: func() interface{} {
432 1 : return &Batch{}
433 1 : },
434 : }
435 :
436 : type indexedBatch struct {
437 : batch Batch
438 : index batchskl.Skiplist
439 : }
440 :
441 : var indexedBatchPool = sync.Pool{
442 1 : New: func() interface{} {
443 1 : return &indexedBatch{}
444 1 : },
445 : }
446 :
447 1 : func newBatch(db *DB, opts ...BatchOption) *Batch {
448 1 : b := batchPool.Get().(*Batch)
449 1 : b.db = db
450 1 : b.opts.ensureDefaults()
451 1 : for _, opt := range opts {
452 1 : opt(&b.opts)
453 1 : }
454 1 : return b
455 : }
456 :
457 1 : func newBatchWithSize(db *DB, size int, opts ...BatchOption) *Batch {
458 1 : b := newBatch(db, opts...)
459 1 : if cap(b.data) < size {
460 1 : b.data = rawalloc.New(0, size)
461 1 : }
462 1 : return b
463 : }
464 :
465 1 : func newIndexedBatch(db *DB, comparer *Comparer) *Batch {
466 1 : i := indexedBatchPool.Get().(*indexedBatch)
467 1 : i.batch.comparer = comparer
468 1 : i.batch.db = db
469 1 : i.batch.index = &i.index
470 1 : i.batch.index.Init(&i.batch.data, comparer.Compare, comparer.AbbreviatedKey)
471 1 : i.batch.opts.ensureDefaults()
472 1 : return &i.batch
473 1 : }
474 :
475 0 : func newIndexedBatchWithSize(db *DB, comparer *Comparer, size int) *Batch {
476 0 : b := newIndexedBatch(db, comparer)
477 0 : if cap(b.data) < size {
478 0 : b.data = rawalloc.New(0, size)
479 0 : }
480 0 : return b
481 : }
482 :
483 : // nextSeqNum returns the batch "sequence number" that will be given to the next
484 : // key written to the batch. During iteration keys within an indexed batch are
485 : // given a sequence number consisting of their offset within the batch combined
486 : // with the base.SeqNumBatchBit bit. These sequence numbers are only
487 : // used during iteration, and the keys are assigned ordinary sequence numbers
488 : // when the batch is committed.
489 1 : func (b *Batch) nextSeqNum() base.SeqNum {
490 1 : return base.SeqNum(len(b.data)) | base.SeqNumBatchBit
491 1 : }
492 :
493 1 : func (b *Batch) release() {
494 1 : if b.db == nil {
495 1 : // The batch was not created using newBatch or newIndexedBatch, or an error
496 1 : // was encountered. We don't try to reuse batches that encountered an error
497 1 : // because they might be stuck somewhere in the system and attempting to
498 1 : // reuse such batches is a recipe for onerous debugging sessions. Instead,
499 1 : // let the GC do its job.
500 1 : return
501 1 : }
502 1 : b.db = nil
503 1 :
504 1 : // NB: This is ugly (it would be cleaner if we could just assign a Batch{}),
505 1 : // but necessary so that we can use atomic.StoreUint32 for the Batch.applied
506 1 : // field. Without using an atomic to clear that field the Go race detector
507 1 : // complains.
508 1 : b.reset()
509 1 : b.comparer = nil
510 1 :
511 1 : if b.index == nil {
512 1 : batchPool.Put(b)
513 1 : } else {
514 1 : b.index, b.rangeDelIndex, b.rangeKeyIndex = nil, nil, nil
515 1 : indexedBatchPool.Put((*indexedBatch)(unsafe.Pointer(b)))
516 1 : }
517 : }
518 :
519 1 : func (b *Batch) refreshMemTableSize() error {
520 1 : b.memTableSize = 0
521 1 : if len(b.data) < batchrepr.HeaderLen {
522 1 : return nil
523 1 : }
524 :
525 1 : b.countRangeDels = 0
526 1 : b.countRangeKeys = 0
527 1 : b.minimumFormatMajorVersion = 0
528 1 : for r := b.Reader(); ; {
529 1 : kind, key, value, ok, err := r.Next()
530 1 : if !ok {
531 1 : if err != nil {
532 0 : return err
533 0 : }
534 1 : break
535 : }
536 1 : switch kind {
537 1 : case InternalKeyKindRangeDelete:
538 1 : b.countRangeDels++
539 1 : case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
540 1 : b.countRangeKeys++
541 1 : case InternalKeyKindSet, InternalKeyKindDelete, InternalKeyKindMerge, InternalKeyKindSingleDelete, InternalKeyKindSetWithDelete:
542 : // fallthrough
543 1 : case InternalKeyKindDeleteSized:
544 1 : if b.minimumFormatMajorVersion < FormatDeleteSizedAndObsolete {
545 1 : b.minimumFormatMajorVersion = FormatDeleteSizedAndObsolete
546 1 : }
547 1 : case InternalKeyKindLogData:
548 1 : // LogData does not contribute to memtable size.
549 1 : continue
550 1 : case InternalKeyKindIngestSST:
551 1 : if b.minimumFormatMajorVersion < FormatFlushableIngest {
552 1 : b.minimumFormatMajorVersion = FormatFlushableIngest
553 1 : }
554 : // This key kind doesn't contribute to the memtable size.
555 1 : continue
556 0 : case InternalKeyKindExcise:
557 0 : if b.minimumFormatMajorVersion < FormatFlushableIngestExcises {
558 0 : b.minimumFormatMajorVersion = FormatFlushableIngestExcises
559 0 : }
560 : // This key kind doesn't contribute to the memtable size.
561 0 : continue
562 0 : default:
563 0 : // Note In some circumstances this might be temporary memory
564 0 : // corruption that can be recovered by discarding the batch and
565 0 : // trying again. In other cases, the batch repr might've been
566 0 : // already persisted elsewhere, and we'll loop continuously trying
567 0 : // to commit the same corrupted batch. The caller is responsible for
568 0 : // distinguishing.
569 0 : return errors.Wrapf(ErrInvalidBatch, "unrecognized kind %v", kind)
570 : }
571 1 : b.memTableSize += memTableEntrySize(len(key), len(value))
572 : }
573 1 : return nil
574 : }
575 :
576 : // Apply the operations contained in the batch to the receiver batch.
577 : //
578 : // It is safe to modify the contents of the arguments after Apply returns.
579 : //
580 : // Apply returns ErrInvalidBatch if the provided batch is invalid in any way.
581 1 : func (b *Batch) Apply(batch *Batch, _ *WriteOptions) error {
582 1 : if b.ingestedSSTBatch {
583 0 : panic("pebble: invalid batch application")
584 : }
585 1 : if len(batch.data) == 0 {
586 0 : return nil
587 0 : }
588 1 : if len(batch.data) < batchrepr.HeaderLen {
589 0 : return ErrInvalidBatch
590 0 : }
591 :
592 1 : offset := len(b.data)
593 1 : if offset == 0 {
594 1 : b.init(offset)
595 1 : offset = batchrepr.HeaderLen
596 1 : }
597 1 : b.data = append(b.data, batch.data[batchrepr.HeaderLen:]...)
598 1 :
599 1 : b.setCount(b.Count() + batch.Count())
600 1 :
601 1 : if b.db != nil || b.index != nil {
602 1 : // Only iterate over the new entries if we need to track memTableSize or in
603 1 : // order to update the index.
604 1 : for iter := batchrepr.Reader(b.data[offset:]); len(iter) > 0; {
605 1 : offset := uintptr(unsafe.Pointer(&iter[0])) - uintptr(unsafe.Pointer(&b.data[0]))
606 1 : kind, key, value, ok, err := iter.Next()
607 1 : if !ok {
608 0 : if err != nil {
609 0 : return err
610 0 : }
611 0 : break
612 : }
613 1 : switch kind {
614 1 : case InternalKeyKindRangeDelete:
615 1 : b.countRangeDels++
616 1 : case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
617 1 : b.countRangeKeys++
618 0 : case InternalKeyKindIngestSST, InternalKeyKindExcise:
619 0 : panic("pebble: invalid key kind for batch")
620 1 : case InternalKeyKindLogData:
621 1 : // LogData does not contribute to memtable size.
622 1 : continue
623 : case InternalKeyKindSet, InternalKeyKindDelete, InternalKeyKindMerge,
624 1 : InternalKeyKindSingleDelete, InternalKeyKindSetWithDelete, InternalKeyKindDeleteSized:
625 : // fallthrough
626 0 : default:
627 0 : // Note In some circumstances this might be temporary memory
628 0 : // corruption that can be recovered by discarding the batch and
629 0 : // trying again. In other cases, the batch repr might've been
630 0 : // already persisted elsewhere, and we'll loop continuously
631 0 : // trying to commit the same corrupted batch. The caller is
632 0 : // responsible for distinguishing.
633 0 : return errors.Wrapf(ErrInvalidBatch, "unrecognized kind %v", kind)
634 : }
635 1 : if b.index != nil {
636 1 : var err error
637 1 : switch kind {
638 1 : case InternalKeyKindRangeDelete:
639 1 : b.tombstones = nil
640 1 : b.tombstonesSeqNum = 0
641 1 : if b.rangeDelIndex == nil {
642 1 : b.rangeDelIndex = batchskl.NewSkiplist(&b.data, b.comparer.Compare, b.comparer.AbbreviatedKey)
643 1 : }
644 1 : err = b.rangeDelIndex.Add(uint32(offset))
645 1 : case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
646 1 : b.rangeKeys = nil
647 1 : b.rangeKeysSeqNum = 0
648 1 : if b.rangeKeyIndex == nil {
649 1 : b.rangeKeyIndex = batchskl.NewSkiplist(&b.data, b.comparer.Compare, b.comparer.AbbreviatedKey)
650 1 : }
651 1 : err = b.rangeKeyIndex.Add(uint32(offset))
652 1 : default:
653 1 : err = b.index.Add(uint32(offset))
654 : }
655 1 : if err != nil {
656 0 : return err
657 0 : }
658 : }
659 1 : b.memTableSize += memTableEntrySize(len(key), len(value))
660 : }
661 : }
662 1 : return nil
663 : }
664 :
665 : // Get gets the value for the given key. It returns ErrNotFound if the Batch
666 : // does not contain the key.
667 : //
668 : // The caller should not modify the contents of the returned slice, but it is
669 : // safe to modify the contents of the argument after Get returns. The returned
670 : // slice will remain valid until the returned Closer is closed. On success, the
671 : // caller MUST call closer.Close() or a memory leak will occur.
672 1 : func (b *Batch) Get(key []byte) ([]byte, io.Closer, error) {
673 1 : if b.index == nil {
674 0 : return nil, nil, ErrNotIndexed
675 0 : }
676 1 : return b.db.getInternal(key, b, nil /* snapshot */)
677 : }
678 :
679 1 : func (b *Batch) prepareDeferredKeyValueRecord(keyLen, valueLen int, kind InternalKeyKind) {
680 1 : if b.committing {
681 0 : panic("pebble: batch already committing")
682 : }
683 1 : if len(b.data) == 0 {
684 1 : b.init(keyLen + valueLen + 2*binary.MaxVarintLen64 + batchrepr.HeaderLen)
685 1 : }
686 1 : b.count++
687 1 : b.memTableSize += memTableEntrySize(keyLen, valueLen)
688 1 :
689 1 : pos := len(b.data)
690 1 : b.deferredOp.offset = uint32(pos)
691 1 : b.grow(1 + 2*maxVarintLen32 + keyLen + valueLen)
692 1 : b.data[pos] = byte(kind)
693 1 : pos++
694 1 :
695 1 : {
696 1 : // TODO(peter): Manually inlined version binary.PutUvarint(). This is 20%
697 1 : // faster on BenchmarkBatchSet on go1.13. Remove if go1.14 or future
698 1 : // versions show this to not be a performance win.
699 1 : x := uint32(keyLen)
700 1 : for x >= 0x80 {
701 1 : b.data[pos] = byte(x) | 0x80
702 1 : x >>= 7
703 1 : pos++
704 1 : }
705 1 : b.data[pos] = byte(x)
706 1 : pos++
707 : }
708 :
709 1 : b.deferredOp.Key = b.data[pos : pos+keyLen]
710 1 : pos += keyLen
711 1 :
712 1 : {
713 1 : // TODO(peter): Manually inlined version binary.PutUvarint(). This is 20%
714 1 : // faster on BenchmarkBatchSet on go1.13. Remove if go1.14 or future
715 1 : // versions show this to not be a performance win.
716 1 : x := uint32(valueLen)
717 1 : for x >= 0x80 {
718 1 : b.data[pos] = byte(x) | 0x80
719 1 : x >>= 7
720 1 : pos++
721 1 : }
722 1 : b.data[pos] = byte(x)
723 1 : pos++
724 : }
725 :
726 1 : b.deferredOp.Value = b.data[pos : pos+valueLen]
727 1 : // Shrink data since varints may be shorter than the upper bound.
728 1 : b.data = b.data[:pos+valueLen]
729 : }
730 :
731 1 : func (b *Batch) prepareDeferredKeyRecord(keyLen int, kind InternalKeyKind) {
732 1 : if b.committing {
733 0 : panic("pebble: batch already committing")
734 : }
735 1 : if len(b.data) == 0 {
736 1 : b.init(keyLen + binary.MaxVarintLen64 + batchrepr.HeaderLen)
737 1 : }
738 1 : b.count++
739 1 : b.memTableSize += memTableEntrySize(keyLen, 0)
740 1 :
741 1 : pos := len(b.data)
742 1 : b.deferredOp.offset = uint32(pos)
743 1 : b.grow(1 + maxVarintLen32 + keyLen)
744 1 : b.data[pos] = byte(kind)
745 1 : pos++
746 1 :
747 1 : {
748 1 : // TODO(peter): Manually inlined version binary.PutUvarint(). Remove if
749 1 : // go1.13 or future versions show this to not be a performance win. See
750 1 : // BenchmarkBatchSet.
751 1 : x := uint32(keyLen)
752 1 : for x >= 0x80 {
753 0 : b.data[pos] = byte(x) | 0x80
754 0 : x >>= 7
755 0 : pos++
756 0 : }
757 1 : b.data[pos] = byte(x)
758 1 : pos++
759 : }
760 :
761 1 : b.deferredOp.Key = b.data[pos : pos+keyLen]
762 1 : b.deferredOp.Value = nil
763 1 :
764 1 : // Shrink data since varint may be shorter than the upper bound.
765 1 : b.data = b.data[:pos+keyLen]
766 : }
767 :
768 : // AddInternalKey allows the caller to add an internal key of point key or range
769 : // key kinds (but not RangeDelete) to a batch. Passing in an internal key of
770 : // kind RangeDelete will result in a panic. Note that the seqnum in the internal
771 : // key is effectively ignored, even though the Kind is preserved. This is
772 : // because the batch format does not allow for a per-key seqnum to be specified,
773 : // only a batch-wide one.
774 : //
775 : // Note that non-indexed keys (IngestKeyKind{LogData,IngestSST}) are not
776 : // supported with this method as they require specialized logic.
777 1 : func (b *Batch) AddInternalKey(key *base.InternalKey, value []byte, _ *WriteOptions) error {
778 1 : keyLen := len(key.UserKey)
779 1 : hasValue := false
780 1 : switch kind := key.Kind(); kind {
781 0 : case InternalKeyKindRangeDelete:
782 0 : panic("unexpected range delete in AddInternalKey")
783 1 : case InternalKeyKindSingleDelete, InternalKeyKindDelete:
784 1 : b.prepareDeferredKeyRecord(keyLen, kind)
785 1 : b.deferredOp.index = b.index
786 1 : case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
787 1 : b.prepareDeferredKeyValueRecord(keyLen, len(value), kind)
788 1 : hasValue = true
789 1 : b.incrementRangeKeysCount()
790 1 : default:
791 1 : b.prepareDeferredKeyValueRecord(keyLen, len(value), kind)
792 1 : hasValue = true
793 1 : b.deferredOp.index = b.index
794 : }
795 1 : copy(b.deferredOp.Key, key.UserKey)
796 1 : if hasValue {
797 1 : copy(b.deferredOp.Value, value)
798 1 : }
799 :
800 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
801 : // in go1.13 will remove the need for this.
802 1 : if b.index != nil {
803 0 : if err := b.index.Add(b.deferredOp.offset); err != nil {
804 0 : return err
805 0 : }
806 : }
807 1 : return nil
808 : }
809 :
810 : // Set adds an action to the batch that sets the key to map to the value.
811 : //
812 : // It is safe to modify the contents of the arguments after Set returns.
813 1 : func (b *Batch) Set(key, value []byte, _ *WriteOptions) error {
814 1 : deferredOp := b.SetDeferred(len(key), len(value))
815 1 : copy(deferredOp.Key, key)
816 1 : copy(deferredOp.Value, value)
817 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
818 1 : // in go1.13 will remove the need for this.
819 1 : if b.index != nil {
820 1 : if err := b.index.Add(deferredOp.offset); err != nil {
821 0 : return err
822 0 : }
823 : }
824 1 : return nil
825 : }
826 :
827 : // SetDeferred is similar to Set in that it adds a set operation to the batch,
828 : // except it only takes in key/value lengths instead of complete slices,
829 : // letting the caller encode into those objects and then call Finish() on the
830 : // returned object.
831 1 : func (b *Batch) SetDeferred(keyLen, valueLen int) *DeferredBatchOp {
832 1 : b.prepareDeferredKeyValueRecord(keyLen, valueLen, InternalKeyKindSet)
833 1 : b.deferredOp.index = b.index
834 1 : return &b.deferredOp
835 1 : }
836 :
837 : // Merge adds an action to the batch that merges the value at key with the new
838 : // value. The details of the merge are dependent upon the configured merge
839 : // operator.
840 : //
841 : // It is safe to modify the contents of the arguments after Merge returns.
842 1 : func (b *Batch) Merge(key, value []byte, _ *WriteOptions) error {
843 1 : deferredOp := b.MergeDeferred(len(key), len(value))
844 1 : copy(deferredOp.Key, key)
845 1 : copy(deferredOp.Value, value)
846 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
847 1 : // in go1.13 will remove the need for this.
848 1 : if b.index != nil {
849 1 : if err := b.index.Add(deferredOp.offset); err != nil {
850 0 : return err
851 0 : }
852 : }
853 1 : return nil
854 : }
855 :
856 : // MergeDeferred is similar to Merge in that it adds a merge operation to the
857 : // batch, except it only takes in key/value lengths instead of complete slices,
858 : // letting the caller encode into those objects and then call Finish() on the
859 : // returned object.
860 1 : func (b *Batch) MergeDeferred(keyLen, valueLen int) *DeferredBatchOp {
861 1 : b.prepareDeferredKeyValueRecord(keyLen, valueLen, InternalKeyKindMerge)
862 1 : b.deferredOp.index = b.index
863 1 : return &b.deferredOp
864 1 : }
865 :
866 : // Delete adds an action to the batch that deletes the entry for key.
867 : //
868 : // It is safe to modify the contents of the arguments after Delete returns.
869 1 : func (b *Batch) Delete(key []byte, _ *WriteOptions) error {
870 1 : deferredOp := b.DeleteDeferred(len(key))
871 1 : copy(deferredOp.Key, key)
872 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
873 1 : // in go1.13 will remove the need for this.
874 1 : if b.index != nil {
875 1 : if err := b.index.Add(deferredOp.offset); err != nil {
876 0 : return err
877 0 : }
878 : }
879 1 : return nil
880 : }
881 :
882 : // DeleteDeferred is similar to Delete in that it adds a delete operation to
883 : // the batch, except it only takes in key/value lengths instead of complete
884 : // slices, letting the caller encode into those objects and then call Finish()
885 : // on the returned object.
886 1 : func (b *Batch) DeleteDeferred(keyLen int) *DeferredBatchOp {
887 1 : b.prepareDeferredKeyRecord(keyLen, InternalKeyKindDelete)
888 1 : b.deferredOp.index = b.index
889 1 : return &b.deferredOp
890 1 : }
891 :
892 : // DeleteSized behaves identically to Delete, but takes an additional
893 : // argument indicating the size of the value being deleted. DeleteSized
894 : // should be preferred when the caller has the expectation that there exists
895 : // a single internal KV pair for the key (eg, the key has not been
896 : // overwritten recently), and the caller knows the size of its value.
897 : //
898 : // DeleteSized will record the value size within the tombstone and use it to
899 : // inform compaction-picking heuristics which strive to reduce space
900 : // amplification in the LSM. This "calling your shot" mechanic allows the
901 : // storage engine to more accurately estimate and reduce space amplification.
902 : //
903 : // It is safe to modify the contents of the arguments after DeleteSized
904 : // returns.
905 1 : func (b *Batch) DeleteSized(key []byte, deletedValueSize uint32, _ *WriteOptions) error {
906 1 : deferredOp := b.DeleteSizedDeferred(len(key), deletedValueSize)
907 1 : copy(b.deferredOp.Key, key)
908 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Check if in a
909 1 : // later Go release this is unnecessary.
910 1 : if b.index != nil {
911 1 : if err := b.index.Add(deferredOp.offset); err != nil {
912 0 : return err
913 0 : }
914 : }
915 1 : return nil
916 : }
917 :
918 : // DeleteSizedDeferred is similar to DeleteSized in that it adds a sized delete
919 : // operation to the batch, except it only takes in key length instead of a
920 : // complete key slice, letting the caller encode into the DeferredBatchOp.Key
921 : // slice and then call Finish() on the returned object.
922 1 : func (b *Batch) DeleteSizedDeferred(keyLen int, deletedValueSize uint32) *DeferredBatchOp {
923 1 : if b.minimumFormatMajorVersion < FormatDeleteSizedAndObsolete {
924 1 : b.minimumFormatMajorVersion = FormatDeleteSizedAndObsolete
925 1 : }
926 :
927 : // Encode the sum of the key length and the value in the value.
928 1 : v := uint64(deletedValueSize) + uint64(keyLen)
929 1 :
930 1 : // Encode `v` as a varint.
931 1 : var buf [binary.MaxVarintLen64]byte
932 1 : n := 0
933 1 : {
934 1 : x := v
935 1 : for x >= 0x80 {
936 1 : buf[n] = byte(x) | 0x80
937 1 : x >>= 7
938 1 : n++
939 1 : }
940 1 : buf[n] = byte(x)
941 1 : n++
942 : }
943 :
944 : // NB: In batch entries and sstable entries, values are stored as
945 : // varstrings. Here, the value is itself a simple varint. This results in an
946 : // unnecessary double layer of encoding:
947 : // varint(n) varint(deletedValueSize)
948 : // The first varint will always be 1-byte, since a varint-encoded uint64
949 : // will never exceed 128 bytes. This unnecessary extra byte and wrapping is
950 : // preserved to avoid special casing across the database, and in particular
951 : // in sstable block decoding which is performance sensitive.
952 1 : b.prepareDeferredKeyValueRecord(keyLen, n, InternalKeyKindDeleteSized)
953 1 : b.deferredOp.index = b.index
954 1 : copy(b.deferredOp.Value, buf[:n])
955 1 : return &b.deferredOp
956 : }
957 :
958 : // SingleDelete adds an action to the batch that single deletes the entry for key.
959 : // WARNING: See the detailed warning in Writer.SingleDelete before using this.
960 : //
961 : // It is safe to modify the contents of the arguments after SingleDelete returns.
962 1 : func (b *Batch) SingleDelete(key []byte, _ *WriteOptions) error {
963 1 : deferredOp := b.SingleDeleteDeferred(len(key))
964 1 : copy(deferredOp.Key, key)
965 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
966 1 : // in go1.13 will remove the need for this.
967 1 : if b.index != nil {
968 1 : if err := b.index.Add(deferredOp.offset); err != nil {
969 0 : return err
970 0 : }
971 : }
972 1 : return nil
973 : }
974 :
975 : // SingleDeleteDeferred is similar to SingleDelete in that it adds a single delete
976 : // operation to the batch, except it only takes in key/value lengths instead of
977 : // complete slices, letting the caller encode into those objects and then call
978 : // Finish() on the returned object.
979 : //
980 : // WARNING: See the detailed warning in Writer.SingleDelete before using this.
981 1 : func (b *Batch) SingleDeleteDeferred(keyLen int) *DeferredBatchOp {
982 1 : b.prepareDeferredKeyRecord(keyLen, InternalKeyKindSingleDelete)
983 1 : b.deferredOp.index = b.index
984 1 : return &b.deferredOp
985 1 : }
986 :
987 : // DeleteRange deletes all of the point keys (and values) in the range
988 : // [start,end) (inclusive on start, exclusive on end). DeleteRange does NOT
989 : // delete overlapping range keys (eg, keys set via RangeKeySet).
990 : //
991 : // It is safe to modify the contents of the arguments after DeleteRange
992 : // returns.
993 1 : func (b *Batch) DeleteRange(start, end []byte, _ *WriteOptions) error {
994 1 : deferredOp := b.DeleteRangeDeferred(len(start), len(end))
995 1 : copy(deferredOp.Key, start)
996 1 : copy(deferredOp.Value, end)
997 1 : // TODO(peter): Manually inline DeferredBatchOp.Finish(). Mid-stack inlining
998 1 : // in go1.13 will remove the need for this.
999 1 : if deferredOp.index != nil {
1000 1 : if err := deferredOp.index.Add(deferredOp.offset); err != nil {
1001 0 : return err
1002 0 : }
1003 : }
1004 1 : return nil
1005 : }
1006 :
1007 : // DeleteRangeDeferred is similar to DeleteRange in that it adds a delete range
1008 : // operation to the batch, except it only takes in key lengths instead of
1009 : // complete slices, letting the caller encode into those objects and then call
1010 : // Finish() on the returned object. Note that DeferredBatchOp.Key should be
1011 : // populated with the start key, and DeferredBatchOp.Value should be populated
1012 : // with the end key.
1013 1 : func (b *Batch) DeleteRangeDeferred(startLen, endLen int) *DeferredBatchOp {
1014 1 : b.prepareDeferredKeyValueRecord(startLen, endLen, InternalKeyKindRangeDelete)
1015 1 : b.countRangeDels++
1016 1 : if b.index != nil {
1017 1 : b.tombstones = nil
1018 1 : b.tombstonesSeqNum = 0
1019 1 : // Range deletions are rare, so we lazily allocate the index for them.
1020 1 : if b.rangeDelIndex == nil {
1021 1 : b.rangeDelIndex = batchskl.NewSkiplist(&b.data, b.comparer.Compare, b.comparer.AbbreviatedKey)
1022 1 : }
1023 1 : b.deferredOp.index = b.rangeDelIndex
1024 : }
1025 1 : return &b.deferredOp
1026 : }
1027 :
1028 : // RangeKeySet sets a range key mapping the key range [start, end) at the MVCC
1029 : // timestamp suffix to value. The suffix is optional. If any portion of the key
1030 : // range [start, end) is already set by a range key with the same suffix value,
1031 : // RangeKeySet overrides it.
1032 : //
1033 : // It is safe to modify the contents of the arguments after RangeKeySet returns.
1034 1 : func (b *Batch) RangeKeySet(start, end, suffix, value []byte, _ *WriteOptions) error {
1035 1 : if invariants.Enabled && b.db != nil {
1036 1 : // RangeKeySet is only supported on prefix keys.
1037 1 : if b.db.opts.Comparer.Split(start) != len(start) {
1038 0 : panic("RangeKeySet called with suffixed start key")
1039 : }
1040 1 : if b.db.opts.Comparer.Split(end) != len(end) {
1041 0 : panic("RangeKeySet called with suffixed end key")
1042 : }
1043 : }
1044 1 : suffixValues := [1]rangekey.SuffixValue{{Suffix: suffix, Value: value}}
1045 1 : internalValueLen := rangekey.EncodedSetValueLen(end, suffixValues[:])
1046 1 :
1047 1 : deferredOp := b.rangeKeySetDeferred(len(start), internalValueLen)
1048 1 : copy(deferredOp.Key, start)
1049 1 : n := rangekey.EncodeSetValue(deferredOp.Value, end, suffixValues[:])
1050 1 : if n != internalValueLen {
1051 0 : panic("unexpected internal value length mismatch")
1052 : }
1053 :
1054 : // Manually inline DeferredBatchOp.Finish().
1055 1 : if deferredOp.index != nil {
1056 1 : if err := deferredOp.index.Add(deferredOp.offset); err != nil {
1057 0 : return err
1058 0 : }
1059 : }
1060 1 : return nil
1061 : }
1062 :
1063 1 : func (b *Batch) rangeKeySetDeferred(startLen, internalValueLen int) *DeferredBatchOp {
1064 1 : b.prepareDeferredKeyValueRecord(startLen, internalValueLen, InternalKeyKindRangeKeySet)
1065 1 : b.incrementRangeKeysCount()
1066 1 : return &b.deferredOp
1067 1 : }
1068 :
1069 1 : func (b *Batch) incrementRangeKeysCount() {
1070 1 : b.countRangeKeys++
1071 1 : if b.index != nil {
1072 1 : b.rangeKeys = nil
1073 1 : b.rangeKeysSeqNum = 0
1074 1 : // Range keys are rare, so we lazily allocate the index for them.
1075 1 : if b.rangeKeyIndex == nil {
1076 1 : b.rangeKeyIndex = batchskl.NewSkiplist(&b.data, b.comparer.Compare, b.comparer.AbbreviatedKey)
1077 1 : }
1078 1 : b.deferredOp.index = b.rangeKeyIndex
1079 : }
1080 : }
1081 :
1082 : // RangeKeyUnset removes a range key mapping the key range [start, end) at the
1083 : // MVCC timestamp suffix. The suffix may be omitted to remove an unsuffixed
1084 : // range key. RangeKeyUnset only removes portions of range keys that fall within
1085 : // the [start, end) key span, and only range keys with suffixes that exactly
1086 : // match the unset suffix.
1087 : //
1088 : // It is safe to modify the contents of the arguments after RangeKeyUnset
1089 : // returns.
1090 1 : func (b *Batch) RangeKeyUnset(start, end, suffix []byte, _ *WriteOptions) error {
1091 1 : if invariants.Enabled && b.db != nil {
1092 1 : // RangeKeyUnset is only supported on prefix keys.
1093 1 : if b.db.opts.Comparer.Split(start) != len(start) {
1094 0 : panic("RangeKeyUnset called with suffixed start key")
1095 : }
1096 1 : if b.db.opts.Comparer.Split(end) != len(end) {
1097 0 : panic("RangeKeyUnset called with suffixed end key")
1098 : }
1099 : }
1100 1 : suffixes := [1][]byte{suffix}
1101 1 : internalValueLen := rangekey.EncodedUnsetValueLen(end, suffixes[:])
1102 1 :
1103 1 : deferredOp := b.rangeKeyUnsetDeferred(len(start), internalValueLen)
1104 1 : copy(deferredOp.Key, start)
1105 1 : n := rangekey.EncodeUnsetValue(deferredOp.Value, end, suffixes[:])
1106 1 : if n != internalValueLen {
1107 0 : panic("unexpected internal value length mismatch")
1108 : }
1109 :
1110 : // Manually inline DeferredBatchOp.Finish()
1111 1 : if deferredOp.index != nil {
1112 1 : if err := deferredOp.index.Add(deferredOp.offset); err != nil {
1113 0 : return err
1114 0 : }
1115 : }
1116 1 : return nil
1117 : }
1118 :
1119 1 : func (b *Batch) rangeKeyUnsetDeferred(startLen, internalValueLen int) *DeferredBatchOp {
1120 1 : b.prepareDeferredKeyValueRecord(startLen, internalValueLen, InternalKeyKindRangeKeyUnset)
1121 1 : b.incrementRangeKeysCount()
1122 1 : return &b.deferredOp
1123 1 : }
1124 :
1125 : // RangeKeyDelete deletes all of the range keys in the range [start,end)
1126 : // (inclusive on start, exclusive on end). It does not delete point keys (for
1127 : // that use DeleteRange). RangeKeyDelete removes all range keys within the
1128 : // bounds, including those with or without suffixes.
1129 : //
1130 : // It is safe to modify the contents of the arguments after RangeKeyDelete
1131 : // returns.
1132 1 : func (b *Batch) RangeKeyDelete(start, end []byte, _ *WriteOptions) error {
1133 1 : if invariants.Enabled && b.db != nil {
1134 1 : // RangeKeyDelete is only supported on prefix keys.
1135 1 : if b.db.opts.Comparer.Split(start) != len(start) {
1136 0 : panic("RangeKeyDelete called with suffixed start key")
1137 : }
1138 1 : if b.db.opts.Comparer.Split(end) != len(end) {
1139 0 : panic("RangeKeyDelete called with suffixed end key")
1140 : }
1141 : }
1142 1 : deferredOp := b.RangeKeyDeleteDeferred(len(start), len(end))
1143 1 : copy(deferredOp.Key, start)
1144 1 : copy(deferredOp.Value, end)
1145 1 : // Manually inline DeferredBatchOp.Finish().
1146 1 : if deferredOp.index != nil {
1147 1 : if err := deferredOp.index.Add(deferredOp.offset); err != nil {
1148 0 : return err
1149 0 : }
1150 : }
1151 1 : return nil
1152 : }
1153 :
1154 : // RangeKeyDeleteDeferred is similar to RangeKeyDelete in that it adds an
1155 : // operation to delete range keys to the batch, except it only takes in key
1156 : // lengths instead of complete slices, letting the caller encode into those
1157 : // objects and then call Finish() on the returned object. Note that
1158 : // DeferredBatchOp.Key should be populated with the start key, and
1159 : // DeferredBatchOp.Value should be populated with the end key.
1160 1 : func (b *Batch) RangeKeyDeleteDeferred(startLen, endLen int) *DeferredBatchOp {
1161 1 : b.prepareDeferredKeyValueRecord(startLen, endLen, InternalKeyKindRangeKeyDelete)
1162 1 : b.incrementRangeKeysCount()
1163 1 : return &b.deferredOp
1164 1 : }
1165 :
1166 : // LogData adds the specified to the batch. The data will be written to the
1167 : // WAL, but not added to memtables or sstables. Log data is never indexed,
1168 : // which makes it useful for testing WAL performance.
1169 : //
1170 : // It is safe to modify the contents of the argument after LogData returns.
1171 1 : func (b *Batch) LogData(data []byte, _ *WriteOptions) error {
1172 1 : origCount, origMemTableSize := b.count, b.memTableSize
1173 1 : b.prepareDeferredKeyRecord(len(data), InternalKeyKindLogData)
1174 1 : copy(b.deferredOp.Key, data)
1175 1 : // Since LogData only writes to the WAL and does not affect the memtable, we
1176 1 : // restore b.count and b.memTableSize to their origin values. Note that
1177 1 : // Batch.count only refers to records that are added to the memtable.
1178 1 : b.count, b.memTableSize = origCount, origMemTableSize
1179 1 : return nil
1180 1 : }
1181 :
1182 : // IngestSST adds the FileNum for an sstable to the batch. The data will only be
1183 : // written to the WAL (not added to memtables or sstables).
1184 1 : func (b *Batch) ingestSST(fileNum base.FileNum) {
1185 1 : if b.Empty() {
1186 1 : b.ingestedSSTBatch = true
1187 1 : } else if !b.ingestedSSTBatch {
1188 0 : // Batch contains other key kinds.
1189 0 : panic("pebble: invalid call to ingestSST")
1190 : }
1191 :
1192 1 : origMemTableSize := b.memTableSize
1193 1 : var buf [binary.MaxVarintLen64]byte
1194 1 : length := binary.PutUvarint(buf[:], uint64(fileNum))
1195 1 : b.prepareDeferredKeyRecord(length, InternalKeyKindIngestSST)
1196 1 : copy(b.deferredOp.Key, buf[:length])
1197 1 : // Since IngestSST writes only to the WAL and does not affect the memtable,
1198 1 : // we restore b.memTableSize to its original value. Note that Batch.count
1199 1 : // is not reset because for the InternalKeyKindIngestSST the count is the
1200 1 : // number of sstable paths which have been added to the batch.
1201 1 : b.memTableSize = origMemTableSize
1202 1 : b.minimumFormatMajorVersion = FormatFlushableIngest
1203 : }
1204 :
1205 : // Excise adds the excise span for a flushable ingest containing an excise. The data
1206 : // will only be written to the WAL (not added to memtables or sstables).
1207 1 : func (b *Batch) excise(start, end []byte) {
1208 1 : if b.Empty() {
1209 1 : b.ingestedSSTBatch = true
1210 1 : } else if !b.ingestedSSTBatch {
1211 0 : // Batch contains other key kinds.
1212 0 : panic("pebble: invalid call to excise")
1213 : }
1214 :
1215 1 : origMemTableSize := b.memTableSize
1216 1 : b.prepareDeferredKeyValueRecord(len(start), len(end), InternalKeyKindExcise)
1217 1 : copy(b.deferredOp.Key, start)
1218 1 : copy(b.deferredOp.Value, end)
1219 1 : // Since excise writes only to the WAL and does not affect the memtable,
1220 1 : // we restore b.memTableSize to its original value. Note that Batch.count
1221 1 : // is not reset because for the InternalKeyKindIngestSST/Excise the count
1222 1 : // is the number of sstable paths which have been added to the batch.
1223 1 : b.memTableSize = origMemTableSize
1224 1 : b.minimumFormatMajorVersion = FormatFlushableIngestExcises
1225 : }
1226 :
1227 : // Empty returns true if the batch is empty, and false otherwise.
1228 1 : func (b *Batch) Empty() bool {
1229 1 : return batchrepr.IsEmpty(b.data)
1230 1 : }
1231 :
1232 : // Len returns the current size of the batch in bytes.
1233 1 : func (b *Batch) Len() int {
1234 1 : return max(batchrepr.HeaderLen, len(b.data))
1235 1 : }
1236 :
1237 : // Repr returns the underlying batch representation. It is not safe to modify
1238 : // the contents. Reset() will not change the contents of the returned value,
1239 : // though any other mutation operation may do so.
1240 1 : func (b *Batch) Repr() []byte {
1241 1 : if len(b.data) == 0 {
1242 1 : b.init(batchrepr.HeaderLen)
1243 1 : }
1244 1 : batchrepr.SetCount(b.data, b.Count())
1245 1 : return b.data
1246 : }
1247 :
1248 : // SetRepr sets the underlying batch representation. The batch takes ownership
1249 : // of the supplied slice. It is not safe to modify it afterwards until the
1250 : // Batch is no longer in use.
1251 : //
1252 : // SetRepr may return ErrInvalidBatch if the supplied slice fails to decode in
1253 : // any way. It will not return an error in any other circumstance.
1254 1 : func (b *Batch) SetRepr(data []byte) error {
1255 1 : h, ok := batchrepr.ReadHeader(data)
1256 1 : if !ok {
1257 0 : return ErrInvalidBatch
1258 0 : }
1259 1 : b.data = data
1260 1 : b.count = uint64(h.Count)
1261 1 : var err error
1262 1 : if b.db != nil {
1263 1 : // Only track memTableSize for batches that will be committed to the DB.
1264 1 : err = b.refreshMemTableSize()
1265 1 : }
1266 1 : return err
1267 : }
1268 :
1269 : // NewIter returns an iterator that is unpositioned (Iterator.Valid() will
1270 : // return false). The iterator can be positioned via a call to SeekGE,
1271 : // SeekPrefixGE, SeekLT, First or Last. Only indexed batches support iterators.
1272 : //
1273 : // The returned Iterator observes all of the Batch's existing mutations, but no
1274 : // later mutations. Its view can be refreshed via RefreshBatchSnapshot or
1275 : // SetOptions().
1276 1 : func (b *Batch) NewIter(o *IterOptions) (*Iterator, error) {
1277 1 : return b.NewIterWithContext(context.Background(), o)
1278 1 : }
1279 :
1280 : // NewIterWithContext is like NewIter, and additionally accepts a context for
1281 : // tracing.
1282 1 : func (b *Batch) NewIterWithContext(ctx context.Context, o *IterOptions) (*Iterator, error) {
1283 1 : if b.index == nil {
1284 0 : return nil, ErrNotIndexed
1285 0 : }
1286 1 : return b.db.newIter(ctx, b, newIterOpts{}, o), nil
1287 : }
1288 :
1289 : // NewBatchOnlyIter constructs an iterator that only reads the contents of the
1290 : // batch, and does not overlay the batch mutations on top of the DB state.
1291 : //
1292 : // The returned Iterator observes all of the Batch's existing mutations, but
1293 : // no later mutations. Its view can be refreshed via RefreshBatchSnapshot or
1294 : // SetOptions().
1295 1 : func (b *Batch) NewBatchOnlyIter(ctx context.Context, o *IterOptions) (*Iterator, error) {
1296 1 : if b.index == nil {
1297 0 : return nil, ErrNotIndexed
1298 0 : }
1299 1 : return b.db.newIter(ctx, b, newIterOpts{batch: batchIterOpts{batchOnly: true}}, o), nil
1300 : }
1301 :
1302 : // newInternalIter creates a new internalIterator that iterates over the
1303 : // contents of the batch.
1304 1 : func (b *Batch) newInternalIter(o *IterOptions) *batchIter {
1305 1 : iter := &batchIter{}
1306 1 : b.initInternalIter(o, iter)
1307 1 : return iter
1308 1 : }
1309 :
1310 1 : func (b *Batch) initInternalIter(o *IterOptions, iter *batchIter) {
1311 1 : *iter = batchIter{
1312 1 : batch: b,
1313 1 : iter: b.index.NewIter(o.GetLowerBound(), o.GetUpperBound()),
1314 1 : // NB: We explicitly do not propagate the batch snapshot to the point
1315 1 : // key iterator. Filtering point keys within the batch iterator can
1316 1 : // cause pathological behavior where a batch iterator advances
1317 1 : // significantly farther than necessary filtering many batch keys that
1318 1 : // are not visible at the batch sequence number. Instead, the merging
1319 1 : // iterator enforces bounds.
1320 1 : //
1321 1 : // For example, consider an engine that contains the committed keys
1322 1 : // 'bar' and 'bax', with no keys between them. Consider a batch
1323 1 : // containing keys 1,000 keys within the range [a,z]. All of the
1324 1 : // batch keys were added to the batch after the iterator was
1325 1 : // constructed, so they are not visible to the iterator. A call to
1326 1 : // SeekGE('bax') would seek the LSM iterators and discover the key
1327 1 : // 'bax'. It would also seek the batch iterator, landing on the key
1328 1 : // 'baz' but discover it that it's not visible. The batch iterator would
1329 1 : // next through the rest of the batch's keys, only to discover there are
1330 1 : // no visible keys greater than or equal to 'bax'.
1331 1 : //
1332 1 : // Filtering these batch points within the merging iterator ensures that
1333 1 : // the batch iterator never needs to iterate beyond 'baz', because it
1334 1 : // already found a smaller, visible key 'bax'.
1335 1 : snapshot: base.SeqNumMax,
1336 1 : }
1337 1 : }
1338 :
1339 1 : func (b *Batch) newRangeDelIter(o *IterOptions, batchSnapshot base.SeqNum) *keyspan.Iter {
1340 1 : // Construct an iterator even if rangeDelIndex is nil, because it is allowed
1341 1 : // to refresh later, so we need the container to exist.
1342 1 : iter := new(keyspan.Iter)
1343 1 : b.initRangeDelIter(o, iter, batchSnapshot)
1344 1 : return iter
1345 1 : }
1346 :
1347 1 : func (b *Batch) initRangeDelIter(_ *IterOptions, iter *keyspan.Iter, batchSnapshot base.SeqNum) {
1348 1 : if b.rangeDelIndex == nil {
1349 1 : iter.Init(b.comparer.Compare, nil)
1350 1 : return
1351 1 : }
1352 :
1353 : // Fragment the range tombstones the first time a range deletion iterator is
1354 : // requested. The cached tombstones are invalidated if another range
1355 : // deletion tombstone is added to the batch. This cache is only guaranteed
1356 : // to be correct if we're opening an iterator to read at a batch sequence
1357 : // number at least as high as tombstonesSeqNum. The cache is guaranteed to
1358 : // include all tombstones up to tombstonesSeqNum, and if any additional
1359 : // tombstones were added after that sequence number the cache would've been
1360 : // cleared.
1361 1 : nextSeqNum := b.nextSeqNum()
1362 1 : if b.tombstones != nil && b.tombstonesSeqNum <= batchSnapshot {
1363 1 : iter.Init(b.comparer.Compare, b.tombstones)
1364 1 : return
1365 1 : }
1366 :
1367 1 : tombstones := make([]keyspan.Span, 0, b.countRangeDels)
1368 1 : frag := &keyspan.Fragmenter{
1369 1 : Cmp: b.comparer.Compare,
1370 1 : Format: b.comparer.FormatKey,
1371 1 : Emit: func(s keyspan.Span) {
1372 1 : tombstones = append(tombstones, s)
1373 1 : },
1374 : }
1375 1 : it := &batchIter{
1376 1 : batch: b,
1377 1 : iter: b.rangeDelIndex.NewIter(nil, nil),
1378 1 : snapshot: batchSnapshot,
1379 1 : }
1380 1 : fragmentRangeDels(frag, it, int(b.countRangeDels))
1381 1 : iter.Init(b.comparer.Compare, tombstones)
1382 1 :
1383 1 : // If we just read all the tombstones in the batch (eg, batchSnapshot was
1384 1 : // set to b.nextSeqNum()), then cache the tombstones so that a subsequent
1385 1 : // call to initRangeDelIter may use them without refragmenting.
1386 1 : if nextSeqNum == batchSnapshot {
1387 1 : b.tombstones = tombstones
1388 1 : b.tombstonesSeqNum = nextSeqNum
1389 1 : }
1390 : }
1391 :
1392 1 : func fragmentRangeDels(frag *keyspan.Fragmenter, it internalIterator, count int) {
1393 1 : // The memory management here is a bit subtle. The keys and values returned
1394 1 : // by the iterator are slices in Batch.data. Thus the fragmented tombstones
1395 1 : // are slices within Batch.data. If additional entries are added to the
1396 1 : // Batch, Batch.data may be reallocated. The references in the fragmented
1397 1 : // tombstones will remain valid, pointing into the old Batch.data. GC for
1398 1 : // the win.
1399 1 :
1400 1 : // Use a single []keyspan.Key buffer to avoid allocating many
1401 1 : // individual []keyspan.Key slices with a single element each.
1402 1 : keyBuf := make([]keyspan.Key, 0, count)
1403 1 : for kv := it.First(); kv != nil; kv = it.Next() {
1404 1 : s := rangedel.Decode(kv.K, kv.InPlaceValue(), keyBuf)
1405 1 : keyBuf = s.Keys[len(s.Keys):]
1406 1 :
1407 1 : // Set a fixed capacity to avoid accidental overwriting.
1408 1 : s.Keys = s.Keys[:len(s.Keys):len(s.Keys)]
1409 1 : frag.Add(s)
1410 1 : }
1411 1 : frag.Finish()
1412 : }
1413 :
1414 1 : func (b *Batch) newRangeKeyIter(o *IterOptions, batchSnapshot base.SeqNum) *keyspan.Iter {
1415 1 : // Construct an iterator even if rangeKeyIndex is nil, because it is allowed
1416 1 : // to refresh later, so we need the container to exist.
1417 1 : iter := new(keyspan.Iter)
1418 1 : b.initRangeKeyIter(o, iter, batchSnapshot)
1419 1 : return iter
1420 1 : }
1421 :
1422 1 : func (b *Batch) initRangeKeyIter(_ *IterOptions, iter *keyspan.Iter, batchSnapshot base.SeqNum) {
1423 1 : if b.rangeKeyIndex == nil {
1424 1 : iter.Init(b.comparer.Compare, nil)
1425 1 : return
1426 1 : }
1427 :
1428 : // Fragment the range keys the first time a range key iterator is requested.
1429 : // The cached spans are invalidated if another range key is added to the
1430 : // batch. This cache is only guaranteed to be correct if we're opening an
1431 : // iterator to read at a batch sequence number at least as high as
1432 : // rangeKeysSeqNum. The cache is guaranteed to include all range keys up to
1433 : // rangeKeysSeqNum, and if any additional range keys were added after that
1434 : // sequence number the cache would've been cleared.
1435 1 : nextSeqNum := b.nextSeqNum()
1436 1 : if b.rangeKeys != nil && b.rangeKeysSeqNum <= batchSnapshot {
1437 1 : iter.Init(b.comparer.Compare, b.rangeKeys)
1438 1 : return
1439 1 : }
1440 :
1441 1 : rangeKeys := make([]keyspan.Span, 0, b.countRangeKeys)
1442 1 : frag := &keyspan.Fragmenter{
1443 1 : Cmp: b.comparer.Compare,
1444 1 : Format: b.comparer.FormatKey,
1445 1 : Emit: func(s keyspan.Span) {
1446 1 : rangeKeys = append(rangeKeys, s)
1447 1 : },
1448 : }
1449 1 : it := &batchIter{
1450 1 : batch: b,
1451 1 : iter: b.rangeKeyIndex.NewIter(nil, nil),
1452 1 : snapshot: batchSnapshot,
1453 1 : }
1454 1 : fragmentRangeKeys(frag, it, int(b.countRangeKeys))
1455 1 : iter.Init(b.comparer.Compare, rangeKeys)
1456 1 :
1457 1 : // If we just read all the range keys in the batch (eg, batchSnapshot was
1458 1 : // set to b.nextSeqNum()), then cache the range keys so that a subsequent
1459 1 : // call to initRangeKeyIter may use them without refragmenting.
1460 1 : if nextSeqNum == batchSnapshot {
1461 1 : b.rangeKeys = rangeKeys
1462 1 : b.rangeKeysSeqNum = nextSeqNum
1463 1 : }
1464 : }
1465 :
1466 1 : func fragmentRangeKeys(frag *keyspan.Fragmenter, it internalIterator, count int) error {
1467 1 : // The memory management here is a bit subtle. The keys and values
1468 1 : // returned by the iterator are slices in Batch.data. Thus the
1469 1 : // fragmented key spans are slices within Batch.data. If additional
1470 1 : // entries are added to the Batch, Batch.data may be reallocated. The
1471 1 : // references in the fragmented keys will remain valid, pointing into
1472 1 : // the old Batch.data. GC for the win.
1473 1 :
1474 1 : // Use a single []keyspan.Key buffer to avoid allocating many
1475 1 : // individual []keyspan.Key slices with a single element each.
1476 1 : keyBuf := make([]keyspan.Key, 0, count)
1477 1 : for kv := it.First(); kv != nil; kv = it.Next() {
1478 1 : s, err := rangekey.Decode(kv.K, kv.InPlaceValue(), keyBuf)
1479 1 : if err != nil {
1480 0 : return err
1481 0 : }
1482 1 : keyBuf = s.Keys[len(s.Keys):]
1483 1 :
1484 1 : // Set a fixed capacity to avoid accidental overwriting.
1485 1 : s.Keys = s.Keys[:len(s.Keys):len(s.Keys)]
1486 1 : frag.Add(s)
1487 : }
1488 1 : frag.Finish()
1489 1 : return nil
1490 : }
1491 :
1492 : // Commit applies the batch to its parent writer.
1493 1 : func (b *Batch) Commit(o *WriteOptions) error {
1494 1 : return b.db.Apply(b, o)
1495 1 : }
1496 :
1497 : // Close closes the batch without committing it.
1498 1 : func (b *Batch) Close() error {
1499 1 : // The storage engine commit pipeline may retain a pointer to b.data beyond
1500 1 : // when Commit() returns. This is possible when configured for WAL failover;
1501 1 : // we don't know if we might need to read the batch data again until the
1502 1 : // batch has been durably synced [even if the committer doesn't care to wait
1503 1 : // for the sync and Sync()=false].
1504 1 : //
1505 1 : // We still want to recycle these batches. The b.lifecycle atomic negotiates
1506 1 : // the batch's lifecycle. If the commit pipeline still might read b.data,
1507 1 : // b.lifecycle will be nonzeroed [the low bits hold a ref count].
1508 1 : for {
1509 1 : v := b.lifecycle.Load()
1510 1 : switch {
1511 1 : case v == 0:
1512 1 : // A zero value indicates that the commit pipeline has no
1513 1 : // outstanding references to the batch. The commit pipeline is
1514 1 : // required to acquire a ref synchronously, so there is no risk that
1515 1 : // the commit pipeline will grab a ref after the call to release. We
1516 1 : // can simply release the batch.
1517 1 : b.release()
1518 1 : return nil
1519 1 : case (v & batchClosedBit) != 0:
1520 1 : // The batch has a batchClosedBit: This batch has already been closed.
1521 1 : return ErrClosed
1522 1 : default:
1523 1 : // There's an outstanding reference. Set the batch released bit so
1524 1 : // that the commit pipeline knows it should release the batch when
1525 1 : // it unrefs.
1526 1 : if b.lifecycle.CompareAndSwap(v, v|batchClosedBit) {
1527 1 : return nil
1528 1 : }
1529 : // CAS Failed—this indicates the outstanding reference just
1530 : // decremented (or the caller illegally closed the batch twice).
1531 : // Loop to reload.
1532 : }
1533 : }
1534 : }
1535 :
1536 : // Indexed returns true if the batch is indexed (i.e. supports read
1537 : // operations).
1538 1 : func (b *Batch) Indexed() bool {
1539 1 : return b.index != nil
1540 1 : }
1541 :
1542 : // init ensures that the batch data slice is initialized to meet the
1543 : // minimum required size and allocates space for the batch header.
1544 1 : func (b *Batch) init(size int) {
1545 1 : b.opts.ensureDefaults()
1546 1 : n := b.opts.initialSizeBytes
1547 1 : for n < size {
1548 1 : n *= 2
1549 1 : }
1550 1 : if cap(b.data) < n {
1551 1 : b.data = rawalloc.New(batchrepr.HeaderLen, n)
1552 1 : }
1553 1 : b.data = b.data[:batchrepr.HeaderLen]
1554 1 : clear(b.data) // Zero the sequence number in the header
1555 : }
1556 :
1557 : // Reset resets the batch for reuse. The underlying byte slice (that is
1558 : // returned by Repr()) may not be modified. It is only necessary to call this
1559 : // method if a batch is explicitly being reused. Close automatically takes are
1560 : // of releasing resources when appropriate for batches that are internally
1561 : // being reused.
1562 1 : func (b *Batch) Reset() {
1563 1 : // In some configurations (WAL failover) the commit pipeline may retain
1564 1 : // b.data beyond a call to commit the batch. When this happens, b.lifecycle
1565 1 : // is nonzero (see the comment above b.lifecycle). In this case it's unsafe
1566 1 : // to mutate b.data, so we discard it. Note that Reset must not be called on
1567 1 : // a closed batch, so v > 0 implies a non-zero ref count and not
1568 1 : // batchClosedBit being set.
1569 1 : if v := b.lifecycle.Load(); v > 0 {
1570 1 : b.data = nil
1571 1 : }
1572 1 : b.reset()
1573 : }
1574 :
1575 1 : func (b *Batch) reset() {
1576 1 : // Zero out the struct, retaining only the fields necessary for manual
1577 1 : // reuse.
1578 1 : b.batchInternal = batchInternal{
1579 1 : data: b.data,
1580 1 : comparer: b.comparer,
1581 1 : opts: b.opts,
1582 1 : index: b.index,
1583 1 : db: b.db,
1584 1 : }
1585 1 : b.applied.Store(false)
1586 1 : if b.data != nil {
1587 1 : if cap(b.data) > b.opts.maxRetainedSizeBytes {
1588 1 : // If the capacity of the buffer is larger than our maximum
1589 1 : // retention size, don't re-use it. Let it be GC-ed instead.
1590 1 : // This prevents the memory from an unusually large batch from
1591 1 : // being held on to indefinitely.
1592 1 : b.data = nil
1593 1 : } else {
1594 1 : // Otherwise, reset the buffer for re-use.
1595 1 : b.data = b.data[:batchrepr.HeaderLen]
1596 1 : clear(b.data)
1597 1 : }
1598 : }
1599 1 : if b.index != nil {
1600 1 : b.index.Init(&b.data, b.comparer.Compare, b.comparer.AbbreviatedKey)
1601 1 : }
1602 : }
1603 :
1604 1 : func (b *Batch) grow(n int) {
1605 1 : newSize := len(b.data) + n
1606 1 : if uint64(newSize) >= maxBatchSize {
1607 1 : panic(ErrBatchTooLarge)
1608 : }
1609 1 : if newSize > cap(b.data) {
1610 1 : newCap := 2 * cap(b.data)
1611 1 : for newCap < newSize {
1612 1 : newCap *= 2
1613 1 : }
1614 1 : newData := rawalloc.New(len(b.data), newCap)
1615 1 : copy(newData, b.data)
1616 1 : b.data = newData
1617 : }
1618 1 : b.data = b.data[:newSize]
1619 : }
1620 :
1621 1 : func (b *Batch) setSeqNum(seqNum base.SeqNum) {
1622 1 : batchrepr.SetSeqNum(b.data, seqNum)
1623 1 : }
1624 :
1625 : // SeqNum returns the batch sequence number which is applied to the first
1626 : // record in the batch. The sequence number is incremented for each subsequent
1627 : // record. It returns zero if the batch is empty.
1628 1 : func (b *Batch) SeqNum() base.SeqNum {
1629 1 : if len(b.data) == 0 {
1630 1 : b.init(batchrepr.HeaderLen)
1631 1 : }
1632 1 : return batchrepr.ReadSeqNum(b.data)
1633 : }
1634 :
1635 1 : func (b *Batch) setCount(v uint32) {
1636 1 : b.count = uint64(v)
1637 1 : }
1638 :
1639 : // Count returns the count of memtable-modifying operations in this batch. All
1640 : // operations with the except of LogData increment this count. For IngestSSTs,
1641 : // count is only used to indicate the number of SSTs ingested in the record, the
1642 : // batch isn't applied to the memtable.
1643 1 : func (b *Batch) Count() uint32 {
1644 1 : if b.count > math.MaxUint32 {
1645 1 : panic(batchrepr.ErrInvalidBatch)
1646 : }
1647 1 : return uint32(b.count)
1648 : }
1649 :
1650 : // Reader returns a batchrepr.Reader for the current batch contents. If the
1651 : // batch is mutated, the new entries will not be visible to the reader.
1652 1 : func (b *Batch) Reader() batchrepr.Reader {
1653 1 : if len(b.data) == 0 {
1654 1 : b.init(batchrepr.HeaderLen)
1655 1 : }
1656 1 : return batchrepr.Read(b.data)
1657 : }
1658 :
1659 : // SyncWait is to be used in conjunction with DB.ApplyNoSyncWait.
1660 1 : func (b *Batch) SyncWait() error {
1661 1 : now := crtime.NowMono()
1662 1 : b.fsyncWait.Wait()
1663 1 : if b.commitErr != nil {
1664 0 : b.db = nil // prevent batch reuse on error
1665 0 : }
1666 1 : waitDuration := now.Elapsed()
1667 1 : b.commitStats.CommitWaitDuration += waitDuration
1668 1 : b.commitStats.TotalDuration += waitDuration
1669 1 : return b.commitErr
1670 : }
1671 :
1672 : // CommitStats returns stats related to committing the batch. Should be called
1673 : // after Batch.Commit, DB.Apply. If DB.ApplyNoSyncWait is used, should be
1674 : // called after Batch.SyncWait.
1675 1 : func (b *Batch) CommitStats() BatchCommitStats {
1676 1 : return b.commitStats
1677 1 : }
1678 :
1679 : // Note: batchIter mirrors the implementation of flushableBatchIter. Keep the
1680 : // two in sync.
1681 : type batchIter struct {
1682 : batch *Batch
1683 : iter batchskl.Iterator
1684 : kv base.InternalKV
1685 : err error
1686 : // snapshot holds a batch "sequence number" at which the batch is being
1687 : // read. This sequence number has the InternalKeySeqNumBatch bit set, so it
1688 : // encodes an offset within the batch. Only batch entries earlier than the
1689 : // offset are visible during iteration.
1690 : snapshot base.SeqNum
1691 : }
1692 :
1693 : // batchIter implements the base.InternalIterator interface.
1694 : var _ base.InternalIterator = (*batchIter)(nil)
1695 :
1696 0 : func (i *batchIter) String() string {
1697 0 : return "batch"
1698 0 : }
1699 :
1700 1 : func (i *batchIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV {
1701 1 : // Ignore TrySeekUsingNext if the view of the batch changed.
1702 1 : if flags.TrySeekUsingNext() && flags.BatchJustRefreshed() {
1703 1 : flags = flags.DisableTrySeekUsingNext()
1704 1 : }
1705 :
1706 1 : i.err = nil // clear cached iteration error
1707 1 : ikey := i.iter.SeekGE(key, flags)
1708 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1709 0 : ikey = i.iter.Next()
1710 0 : }
1711 1 : if ikey == nil {
1712 1 : i.kv = base.InternalKV{}
1713 1 : return nil
1714 1 : }
1715 1 : i.kv.K = *ikey
1716 1 : i.kv.V = base.MakeInPlaceValue(i.value())
1717 1 : return &i.kv
1718 : }
1719 :
1720 1 : func (i *batchIter) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV {
1721 1 : kv := i.SeekGE(key, flags)
1722 1 : if kv == nil {
1723 1 : return nil
1724 1 : }
1725 : // If the key doesn't have the sought prefix, return nil.
1726 1 : if !bytes.Equal(i.batch.comparer.Split.Prefix(kv.K.UserKey), prefix) {
1727 1 : i.kv = base.InternalKV{}
1728 1 : return nil
1729 1 : }
1730 1 : return kv
1731 : }
1732 :
1733 1 : func (i *batchIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV {
1734 1 : i.err = nil // clear cached iteration error
1735 1 : ikey := i.iter.SeekLT(key)
1736 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1737 0 : ikey = i.iter.Prev()
1738 0 : }
1739 1 : if ikey == nil {
1740 1 : i.kv = base.InternalKV{}
1741 1 : return nil
1742 1 : }
1743 1 : i.kv.K = *ikey
1744 1 : i.kv.V = base.MakeInPlaceValue(i.value())
1745 1 : return &i.kv
1746 : }
1747 :
1748 1 : func (i *batchIter) First() *base.InternalKV {
1749 1 : i.err = nil // clear cached iteration error
1750 1 : ikey := i.iter.First()
1751 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1752 1 : ikey = i.iter.Next()
1753 1 : }
1754 1 : if ikey == nil {
1755 1 : i.kv = base.InternalKV{}
1756 1 : return nil
1757 1 : }
1758 1 : i.kv.K = *ikey
1759 1 : i.kv.V = base.MakeInPlaceValue(i.value())
1760 1 : return &i.kv
1761 : }
1762 :
1763 1 : func (i *batchIter) Last() *base.InternalKV {
1764 1 : i.err = nil // clear cached iteration error
1765 1 : ikey := i.iter.Last()
1766 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1767 0 : ikey = i.iter.Prev()
1768 0 : }
1769 1 : if ikey == nil {
1770 0 : i.kv = base.InternalKV{}
1771 0 : return nil
1772 0 : }
1773 1 : i.kv.K = *ikey
1774 1 : i.kv.V = base.MakeInPlaceValue(i.value())
1775 1 : return &i.kv
1776 : }
1777 :
1778 1 : func (i *batchIter) Next() *base.InternalKV {
1779 1 : ikey := i.iter.Next()
1780 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1781 1 : ikey = i.iter.Next()
1782 1 : }
1783 1 : if ikey == nil {
1784 1 : i.kv = base.InternalKV{}
1785 1 : return nil
1786 1 : }
1787 1 : i.kv.K = *ikey
1788 1 : i.kv.V = base.MakeInPlaceValue(i.value())
1789 1 : return &i.kv
1790 : }
1791 :
1792 0 : func (i *batchIter) NextPrefix(succKey []byte) *base.InternalKV {
1793 0 : // Because NextPrefix was invoked `succKey` must be ≥ the key at i's current
1794 0 : // position. Seek the arena iterator using TrySeekUsingNext.
1795 0 : ikey := i.iter.SeekGE(succKey, base.SeekGEFlagsNone.EnableTrySeekUsingNext())
1796 0 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1797 0 : ikey = i.iter.Next()
1798 0 : }
1799 0 : if ikey == nil {
1800 0 : i.kv = base.InternalKV{}
1801 0 : return nil
1802 0 : }
1803 0 : i.kv.K = *ikey
1804 0 : i.kv.V = base.MakeInPlaceValue(i.value())
1805 0 : return &i.kv
1806 : }
1807 :
1808 1 : func (i *batchIter) Prev() *base.InternalKV {
1809 1 : ikey := i.iter.Prev()
1810 1 : for ikey != nil && ikey.SeqNum() >= i.snapshot {
1811 0 : ikey = i.iter.Prev()
1812 0 : }
1813 1 : if ikey == nil {
1814 1 : i.kv = base.InternalKV{}
1815 1 : return nil
1816 1 : }
1817 1 : i.kv.K = *ikey
1818 1 : i.kv.V = base.MakeInPlaceValue(i.value())
1819 1 : return &i.kv
1820 : }
1821 :
1822 1 : func (i *batchIter) value() []byte {
1823 1 : offset, _, keyEnd := i.iter.KeyInfo()
1824 1 : data := i.batch.data
1825 1 : if len(data[offset:]) == 0 {
1826 0 : i.err = base.CorruptionErrorf("corrupted batch")
1827 0 : return nil
1828 0 : }
1829 :
1830 1 : switch InternalKeyKind(data[offset]) {
1831 : case InternalKeyKindSet, InternalKeyKindMerge, InternalKeyKindRangeDelete,
1832 : InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete,
1833 1 : InternalKeyKindDeleteSized:
1834 1 : _, value, ok := batchrepr.DecodeStr(data[keyEnd:])
1835 1 : if !ok {
1836 0 : return nil
1837 0 : }
1838 1 : return value
1839 1 : default:
1840 1 : return nil
1841 : }
1842 : }
1843 :
1844 1 : func (i *batchIter) Error() error {
1845 1 : return i.err
1846 1 : }
1847 :
1848 1 : func (i *batchIter) Close() error {
1849 1 : _ = i.iter.Close()
1850 1 : return i.err
1851 1 : }
1852 :
1853 1 : func (i *batchIter) SetBounds(lower, upper []byte) {
1854 1 : i.iter.SetBounds(lower, upper)
1855 1 : }
1856 :
1857 0 : func (i *batchIter) SetContext(_ context.Context) {}
1858 :
1859 : // DebugTree is part of the InternalIterator interface.
1860 0 : func (i *batchIter) DebugTree(tp treeprinter.Node) {
1861 0 : tp.Childf("%T(%p)", i, i)
1862 0 : }
1863 :
1864 : type flushableBatchEntry struct {
1865 : // offset is the byte offset of the record within the batch repr.
1866 : offset uint32
1867 : // index is the 0-based ordinal number of the record within the batch. Used
1868 : // to compute the seqnum for the record.
1869 : index uint32
1870 : // key{Start,End} are the start and end byte offsets of the key within the
1871 : // batch repr. Cached to avoid decoding the key length on every
1872 : // comparison. The value is stored starting at keyEnd.
1873 : keyStart uint32
1874 : keyEnd uint32
1875 : }
1876 :
1877 : // flushableBatch wraps an existing batch and provides the interfaces needed
1878 : // for making the batch flushable (i.e. able to mimic a memtable).
1879 : type flushableBatch struct {
1880 : cmp Compare
1881 : comparer *base.Comparer
1882 : data []byte
1883 :
1884 : // The base sequence number for the entries in the batch. This is the same
1885 : // value as Batch.seqNum() and is cached here for performance.
1886 : seqNum base.SeqNum
1887 :
1888 : // A slice of offsets and indices for the entries in the batch. Used to
1889 : // implement flushableBatchIter. Unlike the indexing on a normal batch, a
1890 : // flushable batch is indexed such that batch entry i will be given the
1891 : // sequence number flushableBatch.seqNum+i.
1892 : //
1893 : // Sorted in increasing order of key and decreasing order of offset (since
1894 : // higher offsets correspond to higher sequence numbers).
1895 : //
1896 : // Does not include range deletion entries or range key entries.
1897 : offsets []flushableBatchEntry
1898 :
1899 : // Fragmented range deletion tombstones.
1900 : tombstones []keyspan.Span
1901 :
1902 : // Fragmented range keys.
1903 : rangeKeys []keyspan.Span
1904 : }
1905 :
1906 : var _ flushable = (*flushableBatch)(nil)
1907 :
1908 : // newFlushableBatch creates a new batch that implements the flushable
1909 : // interface. This allows the batch to act like a memtable and be placed in the
1910 : // queue of flushable memtables. Note that the flushable batch takes ownership
1911 : // of the batch data.
1912 1 : func newFlushableBatch(batch *Batch, comparer *Comparer) (*flushableBatch, error) {
1913 1 : b := &flushableBatch{
1914 1 : data: batch.data,
1915 1 : cmp: comparer.Compare,
1916 1 : comparer: comparer,
1917 1 : offsets: make([]flushableBatchEntry, 0, batch.Count()),
1918 1 : }
1919 1 : if b.data != nil {
1920 1 : // Note that this sequence number is not correct when this batch has not
1921 1 : // been applied since the sequence number has not been assigned yet. The
1922 1 : // correct sequence number will be set later. But it is correct when the
1923 1 : // batch is being replayed from the WAL.
1924 1 : b.seqNum = batch.SeqNum()
1925 1 : }
1926 1 : var rangeDelOffsets []flushableBatchEntry
1927 1 : var rangeKeyOffsets []flushableBatchEntry
1928 1 : if len(b.data) > batchrepr.HeaderLen {
1929 1 : // Non-empty batch.
1930 1 : var index uint32
1931 1 : for iter := batchrepr.Read(b.data); len(iter) > 0; {
1932 1 : offset := uintptr(unsafe.Pointer(&iter[0])) - uintptr(unsafe.Pointer(&b.data[0]))
1933 1 : kind, key, _, ok, err := iter.Next()
1934 1 : if !ok {
1935 0 : if err != nil {
1936 0 : return nil, err
1937 0 : }
1938 0 : break
1939 : }
1940 1 : entry := flushableBatchEntry{
1941 1 : offset: uint32(offset),
1942 1 : index: uint32(index),
1943 1 : }
1944 1 : if keySize := uint32(len(key)); keySize == 0 {
1945 1 : // Must add 2 to the offset. One byte encodes `kind` and the next
1946 1 : // byte encodes `0`, which is the length of the key.
1947 1 : entry.keyStart = uint32(offset) + 2
1948 1 : entry.keyEnd = entry.keyStart
1949 1 : } else {
1950 1 : entry.keyStart = uint32(uintptr(unsafe.Pointer(&key[0])) -
1951 1 : uintptr(unsafe.Pointer(&b.data[0])))
1952 1 : entry.keyEnd = entry.keyStart + keySize
1953 1 : }
1954 1 : switch kind {
1955 1 : case InternalKeyKindRangeDelete:
1956 1 : rangeDelOffsets = append(rangeDelOffsets, entry)
1957 1 : case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
1958 1 : rangeKeyOffsets = append(rangeKeyOffsets, entry)
1959 1 : case InternalKeyKindLogData:
1960 1 : // Skip it; we never want to iterate over LogDatas.
1961 1 : continue
1962 : case InternalKeyKindSet, InternalKeyKindDelete, InternalKeyKindMerge,
1963 1 : InternalKeyKindSingleDelete, InternalKeyKindSetWithDelete, InternalKeyKindDeleteSized:
1964 1 : b.offsets = append(b.offsets, entry)
1965 0 : default:
1966 0 : // Note In some circumstances this might be temporary memory
1967 0 : // corruption that can be recovered by discarding the batch and
1968 0 : // trying again. In other cases, the batch repr might've been
1969 0 : // already persisted elsewhere, and we'll loop continuously trying
1970 0 : // to commit the same corrupted batch. The caller is responsible for
1971 0 : // distinguishing.
1972 0 : return nil, errors.Wrapf(ErrInvalidBatch, "unrecognized kind %v", kind)
1973 : }
1974 : // NB: index (used for entry.offset above) must not reach the
1975 : // batch.count, because the offset is used in conjunction with the
1976 : // batch's sequence number to assign sequence numbers to keys within
1977 : // the batch. If we assign KV's indexes as high as batch.count,
1978 : // we'll begin assigning keys sequence numbers that weren't
1979 : // allocated.
1980 1 : if index >= uint32(batch.count) {
1981 0 : return nil, base.AssertionFailedf("pebble: batch entry index %d ≥ batch.count %d", index, batch.count)
1982 0 : }
1983 1 : index++
1984 : }
1985 : }
1986 :
1987 : // Sort all of offsets, rangeDelOffsets and rangeKeyOffsets, using *batch's
1988 : // sort.Interface implementation.
1989 1 : pointOffsets := b.offsets
1990 1 : sort.Sort(b)
1991 1 : b.offsets = rangeDelOffsets
1992 1 : sort.Sort(b)
1993 1 : b.offsets = rangeKeyOffsets
1994 1 : sort.Sort(b)
1995 1 : b.offsets = pointOffsets
1996 1 :
1997 1 : if len(rangeDelOffsets) > 0 {
1998 1 : frag := &keyspan.Fragmenter{
1999 1 : Cmp: b.cmp,
2000 1 : Format: b.comparer.FormatKey,
2001 1 : Emit: func(s keyspan.Span) {
2002 1 : b.tombstones = append(b.tombstones, s)
2003 1 : },
2004 : }
2005 1 : it := &flushableBatchIter{
2006 1 : batch: b,
2007 1 : data: b.data,
2008 1 : offsets: rangeDelOffsets,
2009 1 : cmp: b.cmp,
2010 1 : index: -1,
2011 1 : }
2012 1 : fragmentRangeDels(frag, it, len(rangeDelOffsets))
2013 : }
2014 1 : if len(rangeKeyOffsets) > 0 {
2015 1 : frag := &keyspan.Fragmenter{
2016 1 : Cmp: b.cmp,
2017 1 : Format: b.comparer.FormatKey,
2018 1 : Emit: func(s keyspan.Span) {
2019 1 : b.rangeKeys = append(b.rangeKeys, s)
2020 1 : },
2021 : }
2022 1 : it := &flushableBatchIter{
2023 1 : batch: b,
2024 1 : data: b.data,
2025 1 : offsets: rangeKeyOffsets,
2026 1 : cmp: b.cmp,
2027 1 : index: -1,
2028 1 : }
2029 1 : fragmentRangeKeys(frag, it, len(rangeKeyOffsets))
2030 : }
2031 1 : return b, nil
2032 : }
2033 :
2034 1 : func (b *flushableBatch) setSeqNum(seqNum base.SeqNum) {
2035 1 : if b.seqNum != 0 {
2036 0 : panic(fmt.Sprintf("pebble: flushableBatch.seqNum already set: %d", b.seqNum))
2037 : }
2038 1 : b.seqNum = seqNum
2039 1 : for i := range b.tombstones {
2040 1 : for j := range b.tombstones[i].Keys {
2041 1 : b.tombstones[i].Keys[j].Trailer = base.MakeTrailer(
2042 1 : b.tombstones[i].Keys[j].SeqNum()+seqNum,
2043 1 : b.tombstones[i].Keys[j].Kind(),
2044 1 : )
2045 1 : }
2046 : }
2047 1 : for i := range b.rangeKeys {
2048 1 : for j := range b.rangeKeys[i].Keys {
2049 1 : b.rangeKeys[i].Keys[j].Trailer = base.MakeTrailer(
2050 1 : b.rangeKeys[i].Keys[j].SeqNum()+seqNum,
2051 1 : b.rangeKeys[i].Keys[j].Kind(),
2052 1 : )
2053 1 : }
2054 : }
2055 : }
2056 :
2057 1 : func (b *flushableBatch) Len() int {
2058 1 : return len(b.offsets)
2059 1 : }
2060 :
2061 1 : func (b *flushableBatch) Less(i, j int) bool {
2062 1 : ei := &b.offsets[i]
2063 1 : ej := &b.offsets[j]
2064 1 : ki := b.data[ei.keyStart:ei.keyEnd]
2065 1 : kj := b.data[ej.keyStart:ej.keyEnd]
2066 1 : switch c := b.cmp(ki, kj); {
2067 1 : case c < 0:
2068 1 : return true
2069 1 : case c > 0:
2070 1 : return false
2071 1 : default:
2072 1 : return ei.offset > ej.offset
2073 : }
2074 : }
2075 :
2076 1 : func (b *flushableBatch) Swap(i, j int) {
2077 1 : b.offsets[i], b.offsets[j] = b.offsets[j], b.offsets[i]
2078 1 : }
2079 :
2080 : // newIter is part of the flushable interface.
2081 1 : func (b *flushableBatch) newIter(o *IterOptions) internalIterator {
2082 1 : return &flushableBatchIter{
2083 1 : batch: b,
2084 1 : data: b.data,
2085 1 : offsets: b.offsets,
2086 1 : cmp: b.cmp,
2087 1 : index: -1,
2088 1 : lower: o.GetLowerBound(),
2089 1 : upper: o.GetUpperBound(),
2090 1 : }
2091 1 : }
2092 :
2093 : // newFlushIter is part of the flushable interface.
2094 1 : func (b *flushableBatch) newFlushIter(o *IterOptions) internalIterator {
2095 1 : return &flushFlushableBatchIter{
2096 1 : flushableBatchIter: flushableBatchIter{
2097 1 : batch: b,
2098 1 : data: b.data,
2099 1 : offsets: b.offsets,
2100 1 : cmp: b.cmp,
2101 1 : index: -1,
2102 1 : },
2103 1 : }
2104 1 : }
2105 :
2106 : // newRangeDelIter is part of the flushable interface.
2107 1 : func (b *flushableBatch) newRangeDelIter(o *IterOptions) keyspan.FragmentIterator {
2108 1 : if len(b.tombstones) == 0 {
2109 1 : return nil
2110 1 : }
2111 1 : return keyspan.NewIter(b.cmp, b.tombstones)
2112 : }
2113 :
2114 : // newRangeKeyIter is part of the flushable interface.
2115 1 : func (b *flushableBatch) newRangeKeyIter(o *IterOptions) keyspan.FragmentIterator {
2116 1 : if len(b.rangeKeys) == 0 {
2117 1 : return nil
2118 1 : }
2119 1 : return keyspan.NewIter(b.cmp, b.rangeKeys)
2120 : }
2121 :
2122 : // containsRangeKeys is part of the flushable interface.
2123 1 : func (b *flushableBatch) containsRangeKeys() bool { return len(b.rangeKeys) > 0 }
2124 :
2125 : // inuseBytes is part of the flushable interface.
2126 1 : func (b *flushableBatch) inuseBytes() uint64 {
2127 1 : return uint64(len(b.data) - batchrepr.HeaderLen)
2128 1 : }
2129 :
2130 : // totalBytes is part of the flushable interface.
2131 1 : func (b *flushableBatch) totalBytes() uint64 {
2132 1 : return uint64(cap(b.data))
2133 1 : }
2134 :
2135 : // readyForFlush is part of the flushable interface.
2136 1 : func (b *flushableBatch) readyForFlush() bool {
2137 1 : // A flushable batch is always ready for flush; it must be flushed together
2138 1 : // with the previous memtable.
2139 1 : return true
2140 1 : }
2141 :
2142 : // computePossibleOverlaps is part of the flushable interface.
2143 : func (b *flushableBatch) computePossibleOverlaps(
2144 : fn func(bounded) shouldContinue, bounded ...bounded,
2145 1 : ) {
2146 1 : computePossibleOverlapsGenericImpl[*flushableBatch](b, b.cmp, fn, bounded)
2147 1 : }
2148 :
2149 : // Note: flushableBatchIter mirrors the implementation of batchIter. Keep the
2150 : // two in sync.
2151 : type flushableBatchIter struct {
2152 : // Members to be initialized by creator.
2153 : batch *flushableBatch
2154 : // The bytes backing the batch. Always the same as batch.data?
2155 : data []byte
2156 : // The sorted entries. This is not always equal to batch.offsets.
2157 : offsets []flushableBatchEntry
2158 : cmp Compare
2159 : // Must be initialized to -1. It is the index into offsets that represents
2160 : // the current iterator position.
2161 : index int
2162 :
2163 : // For internal use by the implementation.
2164 : kv base.InternalKV
2165 : err error
2166 :
2167 : // Optionally initialize to bounds of iteration, if any.
2168 : lower []byte
2169 : upper []byte
2170 : }
2171 :
2172 : // flushableBatchIter implements the base.InternalIterator interface.
2173 : var _ base.InternalIterator = (*flushableBatchIter)(nil)
2174 :
2175 0 : func (i *flushableBatchIter) String() string {
2176 0 : return "flushable-batch"
2177 0 : }
2178 :
2179 : // SeekGE implements internalIterator.SeekGE, as documented in the pebble
2180 : // package. Ignore flags.TrySeekUsingNext() since we don't expect this
2181 : // optimization to provide much benefit here at the moment.
2182 1 : func (i *flushableBatchIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV {
2183 1 : i.err = nil // clear cached iteration error
2184 1 : ikey := base.MakeSearchKey(key)
2185 1 : i.index = sort.Search(len(i.offsets), func(j int) bool {
2186 1 : return base.InternalCompare(i.cmp, ikey, i.getKey(j)) <= 0
2187 1 : })
2188 1 : if i.index >= len(i.offsets) {
2189 1 : return nil
2190 1 : }
2191 1 : kv := i.getKV(i.index)
2192 1 : if i.upper != nil && i.cmp(kv.K.UserKey, i.upper) >= 0 {
2193 1 : i.index = len(i.offsets)
2194 1 : return nil
2195 1 : }
2196 1 : return kv
2197 : }
2198 :
2199 : // SeekPrefixGE implements internalIterator.SeekPrefixGE, as documented in the
2200 : // pebble package.
2201 : func (i *flushableBatchIter) SeekPrefixGE(
2202 : prefix, key []byte, flags base.SeekGEFlags,
2203 1 : ) *base.InternalKV {
2204 1 : kv := i.SeekGE(key, flags)
2205 1 : if kv == nil {
2206 1 : return nil
2207 1 : }
2208 : // If the key doesn't have the sought prefix, return nil.
2209 1 : if !bytes.Equal(i.batch.comparer.Split.Prefix(kv.K.UserKey), prefix) {
2210 1 : return nil
2211 1 : }
2212 1 : return kv
2213 : }
2214 :
2215 : // SeekLT implements internalIterator.SeekLT, as documented in the pebble
2216 : // package.
2217 1 : func (i *flushableBatchIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV {
2218 1 : i.err = nil // clear cached iteration error
2219 1 : ikey := base.MakeSearchKey(key)
2220 1 : i.index = sort.Search(len(i.offsets), func(j int) bool {
2221 1 : return base.InternalCompare(i.cmp, ikey, i.getKey(j)) <= 0
2222 1 : })
2223 1 : i.index--
2224 1 : if i.index < 0 {
2225 1 : return nil
2226 1 : }
2227 1 : kv := i.getKV(i.index)
2228 1 : if i.lower != nil && i.cmp(kv.K.UserKey, i.lower) < 0 {
2229 1 : i.index = -1
2230 1 : return nil
2231 1 : }
2232 1 : return kv
2233 : }
2234 :
2235 : // First implements internalIterator.First, as documented in the pebble
2236 : // package.
2237 1 : func (i *flushableBatchIter) First() *base.InternalKV {
2238 1 : i.err = nil // clear cached iteration error
2239 1 : if len(i.offsets) == 0 {
2240 1 : return nil
2241 1 : }
2242 1 : i.index = 0
2243 1 : kv := i.getKV(i.index)
2244 1 : if i.upper != nil && i.cmp(kv.K.UserKey, i.upper) >= 0 {
2245 1 : i.index = len(i.offsets)
2246 1 : return nil
2247 1 : }
2248 1 : return kv
2249 : }
2250 :
2251 : // Last implements internalIterator.Last, as documented in the pebble
2252 : // package.
2253 1 : func (i *flushableBatchIter) Last() *base.InternalKV {
2254 1 : i.err = nil // clear cached iteration error
2255 1 : if len(i.offsets) == 0 {
2256 0 : return nil
2257 0 : }
2258 1 : i.index = len(i.offsets) - 1
2259 1 : kv := i.getKV(i.index)
2260 1 : if i.lower != nil && i.cmp(kv.K.UserKey, i.lower) < 0 {
2261 1 : i.index = -1
2262 1 : return nil
2263 1 : }
2264 1 : return kv
2265 : }
2266 :
2267 : // Note: flushFlushableBatchIter.Next mirrors the implementation of
2268 : // flushableBatchIter.Next due to performance. Keep the two in sync.
2269 1 : func (i *flushableBatchIter) Next() *base.InternalKV {
2270 1 : if i.index == len(i.offsets) {
2271 0 : return nil
2272 0 : }
2273 1 : i.index++
2274 1 : if i.index == len(i.offsets) {
2275 1 : return nil
2276 1 : }
2277 1 : kv := i.getKV(i.index)
2278 1 : if i.upper != nil && i.cmp(kv.K.UserKey, i.upper) >= 0 {
2279 1 : i.index = len(i.offsets)
2280 1 : return nil
2281 1 : }
2282 1 : return kv
2283 : }
2284 :
2285 1 : func (i *flushableBatchIter) Prev() *base.InternalKV {
2286 1 : if i.index < 0 {
2287 0 : return nil
2288 0 : }
2289 1 : i.index--
2290 1 : if i.index < 0 {
2291 1 : return nil
2292 1 : }
2293 1 : kv := i.getKV(i.index)
2294 1 : if i.lower != nil && i.cmp(kv.K.UserKey, i.lower) < 0 {
2295 1 : i.index = -1
2296 1 : return nil
2297 1 : }
2298 1 : return kv
2299 : }
2300 :
2301 : // Note: flushFlushableBatchIter.NextPrefix mirrors the implementation of
2302 : // flushableBatchIter.NextPrefix due to performance. Keep the two in sync.
2303 0 : func (i *flushableBatchIter) NextPrefix(succKey []byte) *base.InternalKV {
2304 0 : return i.SeekGE(succKey, base.SeekGEFlagsNone.EnableTrySeekUsingNext())
2305 0 : }
2306 :
2307 1 : func (i *flushableBatchIter) getKey(index int) InternalKey {
2308 1 : e := &i.offsets[index]
2309 1 : kind := InternalKeyKind(i.data[e.offset])
2310 1 : key := i.data[e.keyStart:e.keyEnd]
2311 1 : return base.MakeInternalKey(key, i.batch.seqNum+base.SeqNum(e.index), kind)
2312 1 : }
2313 :
2314 1 : func (i *flushableBatchIter) getKV(index int) *base.InternalKV {
2315 1 : i.kv = base.InternalKV{
2316 1 : K: i.getKey(index),
2317 1 : V: base.MakeInPlaceValue(i.extractValue()),
2318 1 : }
2319 1 : return &i.kv
2320 1 : }
2321 :
2322 1 : func (i *flushableBatchIter) extractValue() []byte {
2323 1 : p := i.data[i.offsets[i.index].offset:]
2324 1 : if len(p) == 0 {
2325 0 : i.err = base.CorruptionErrorf("corrupted batch")
2326 0 : return nil
2327 0 : }
2328 1 : kind := InternalKeyKind(p[0])
2329 1 : if kind > InternalKeyKindMax {
2330 0 : i.err = base.CorruptionErrorf("corrupted batch")
2331 0 : return nil
2332 0 : }
2333 1 : var value []byte
2334 1 : var ok bool
2335 1 : switch kind {
2336 : case InternalKeyKindSet, InternalKeyKindMerge, InternalKeyKindRangeDelete,
2337 : InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete,
2338 1 : InternalKeyKindDeleteSized:
2339 1 : keyEnd := i.offsets[i.index].keyEnd
2340 1 : _, value, ok = batchrepr.DecodeStr(i.data[keyEnd:])
2341 1 : if !ok {
2342 0 : i.err = base.CorruptionErrorf("corrupted batch")
2343 0 : return nil
2344 0 : }
2345 : }
2346 1 : return value
2347 : }
2348 :
2349 0 : func (i *flushableBatchIter) Valid() bool {
2350 0 : return i.index >= 0 && i.index < len(i.offsets)
2351 0 : }
2352 :
2353 1 : func (i *flushableBatchIter) Error() error {
2354 1 : return i.err
2355 1 : }
2356 :
2357 1 : func (i *flushableBatchIter) Close() error {
2358 1 : return i.err
2359 1 : }
2360 :
2361 1 : func (i *flushableBatchIter) SetBounds(lower, upper []byte) {
2362 1 : i.lower = lower
2363 1 : i.upper = upper
2364 1 : }
2365 :
2366 0 : func (i *flushableBatchIter) SetContext(_ context.Context) {}
2367 :
2368 : // DebugTree is part of the InternalIterator interface.
2369 0 : func (i *flushableBatchIter) DebugTree(tp treeprinter.Node) {
2370 0 : tp.Childf("%T(%p)", i, i)
2371 0 : }
2372 :
2373 : // flushFlushableBatchIter is similar to flushableBatchIter but it keeps track
2374 : // of number of bytes iterated.
2375 : type flushFlushableBatchIter struct {
2376 : flushableBatchIter
2377 : }
2378 :
2379 : // flushFlushableBatchIter implements the base.InternalIterator interface.
2380 : var _ base.InternalIterator = (*flushFlushableBatchIter)(nil)
2381 :
2382 0 : func (i *flushFlushableBatchIter) String() string {
2383 0 : return "flushable-batch"
2384 0 : }
2385 :
2386 0 : func (i *flushFlushableBatchIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV {
2387 0 : panic("pebble: SeekGE unimplemented")
2388 : }
2389 :
2390 : func (i *flushFlushableBatchIter) SeekPrefixGE(
2391 : prefix, key []byte, flags base.SeekGEFlags,
2392 0 : ) *base.InternalKV {
2393 0 : panic("pebble: SeekPrefixGE unimplemented")
2394 : }
2395 :
2396 0 : func (i *flushFlushableBatchIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV {
2397 0 : panic("pebble: SeekLT unimplemented")
2398 : }
2399 :
2400 1 : func (i *flushFlushableBatchIter) First() *base.InternalKV {
2401 1 : i.err = nil // clear cached iteration error
2402 1 : return i.flushableBatchIter.First()
2403 1 : }
2404 :
2405 0 : func (i *flushFlushableBatchIter) NextPrefix(succKey []byte) *base.InternalKV {
2406 0 : panic("pebble: Prev unimplemented")
2407 : }
2408 :
2409 : // Note: flushFlushableBatchIter.Next mirrors the implementation of
2410 : // flushableBatchIter.Next due to performance. Keep the two in sync.
2411 1 : func (i *flushFlushableBatchIter) Next() *base.InternalKV {
2412 1 : if i.index == len(i.offsets) {
2413 0 : return nil
2414 0 : }
2415 1 : i.index++
2416 1 : if i.index == len(i.offsets) {
2417 1 : return nil
2418 1 : }
2419 1 : return i.getKV(i.index)
2420 : }
2421 :
2422 0 : func (i flushFlushableBatchIter) Prev() *base.InternalKV {
2423 0 : panic("pebble: Prev unimplemented")
2424 : }
2425 :
2426 : // batchOptions holds the parameters to configure batch.
2427 : type batchOptions struct {
2428 : initialSizeBytes int
2429 : maxRetainedSizeBytes int
2430 : }
2431 :
2432 : // ensureDefaults creates batch options with default values.
2433 1 : func (o *batchOptions) ensureDefaults() {
2434 1 : if o.initialSizeBytes <= 0 {
2435 1 : o.initialSizeBytes = defaultBatchInitialSize
2436 1 : }
2437 1 : if o.maxRetainedSizeBytes <= 0 {
2438 1 : o.maxRetainedSizeBytes = defaultBatchMaxRetainedSize
2439 1 : }
2440 : }
2441 :
2442 : // BatchOption allows customizing the batch.
2443 : type BatchOption func(*batchOptions)
2444 :
2445 : // WithInitialSizeBytes sets a custom initial size for the batch. Defaults
2446 : // to 1KB.
2447 1 : func WithInitialSizeBytes(s int) BatchOption {
2448 1 : return func(opts *batchOptions) {
2449 1 : opts.initialSizeBytes = s
2450 1 : }
2451 : }
2452 :
2453 : // WithMaxRetainedSizeBytes sets a custom max size for the batch to be
2454 : // re-used. Any batch which exceeds the max retained size would be GC-ed.
2455 : // Defaults to 1MB.
2456 1 : func WithMaxRetainedSizeBytes(s int) BatchOption {
2457 1 : return func(opts *batchOptions) {
2458 1 : opts.maxRetainedSizeBytes = s
2459 1 : }
2460 : }
2461 :
2462 : // batchSort returns iterators for the sorted contents of the batch. It is
2463 : // intended for testing use only. The batch.Sort dance is done to prevent
2464 : // exposing this method in the public pebble interface.
2465 : func batchSort(
2466 : i interface{},
2467 : ) (
2468 : points internalIterator,
2469 : rangeDels keyspan.FragmentIterator,
2470 : rangeKeys keyspan.FragmentIterator,
2471 1 : ) {
2472 1 : b := i.(*Batch)
2473 1 : if b.Indexed() {
2474 1 : pointIter := b.newInternalIter(nil)
2475 1 : rangeDelIter := b.newRangeDelIter(nil, math.MaxUint64)
2476 1 : rangeKeyIter := b.newRangeKeyIter(nil, math.MaxUint64)
2477 1 : return pointIter, rangeDelIter, rangeKeyIter
2478 1 : }
2479 1 : f, err := newFlushableBatch(b, b.db.opts.Comparer)
2480 1 : if err != nil {
2481 0 : panic(err)
2482 : }
2483 1 : return f.newIter(nil), f.newRangeDelIter(nil), f.newRangeKeyIter(nil)
2484 : }
2485 :
2486 1 : func init() {
2487 1 : private.BatchSort = batchSort
2488 1 : }
|