Line data Source code
1 : // Copyright 2012 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : // Package pebble provides an ordered key/value store.
6 : package pebble // import "github.com/cockroachdb/pebble"
7 :
8 : import (
9 : "context"
10 : "fmt"
11 : "io"
12 : "os"
13 : "strconv"
14 : "sync"
15 : "sync/atomic"
16 : "time"
17 :
18 : "github.com/cockroachdb/errors"
19 : "github.com/cockroachdb/pebble/internal/arenaskl"
20 : "github.com/cockroachdb/pebble/internal/base"
21 : "github.com/cockroachdb/pebble/internal/invalidating"
22 : "github.com/cockroachdb/pebble/internal/invariants"
23 : "github.com/cockroachdb/pebble/internal/keyspan"
24 : "github.com/cockroachdb/pebble/internal/manifest"
25 : "github.com/cockroachdb/pebble/internal/manual"
26 : "github.com/cockroachdb/pebble/objstorage"
27 : "github.com/cockroachdb/pebble/objstorage/remote"
28 : "github.com/cockroachdb/pebble/rangekey"
29 : "github.com/cockroachdb/pebble/record"
30 : "github.com/cockroachdb/pebble/sstable"
31 : "github.com/cockroachdb/pebble/vfs"
32 : "github.com/cockroachdb/pebble/vfs/atomicfs"
33 : "github.com/cockroachdb/tokenbucket"
34 : "github.com/prometheus/client_golang/prometheus"
35 : )
36 :
37 : const (
38 : // minTableCacheSize is the minimum size of the table cache, for a single db.
39 : minTableCacheSize = 64
40 :
41 : // numNonTableCacheFiles is an approximation for the number of files
42 : // that we don't use for table caches, for a given db.
43 : numNonTableCacheFiles = 10
44 : )
45 :
46 : var (
47 : // ErrNotFound is returned when a get operation does not find the requested
48 : // key.
49 : ErrNotFound = base.ErrNotFound
50 : // ErrClosed is panicked when an operation is performed on a closed snapshot or
51 : // DB. Use errors.Is(err, ErrClosed) to check for this error.
52 : ErrClosed = errors.New("pebble: closed")
53 : // ErrReadOnly is returned when a write operation is performed on a read-only
54 : // database.
55 : ErrReadOnly = errors.New("pebble: read-only")
56 : // errNoSplit indicates that the user is trying to perform a range key
57 : // operation but the configured Comparer does not provide a Split
58 : // implementation.
59 : errNoSplit = errors.New("pebble: Comparer.Split required for range key operations")
60 : )
61 :
62 : // Reader is a readable key/value store.
63 : //
64 : // It is safe to call Get and NewIter from concurrent goroutines.
65 : type Reader interface {
66 : // Get gets the value for the given key. It returns ErrNotFound if the DB
67 : // does not contain the key.
68 : //
69 : // The caller should not modify the contents of the returned slice, but it is
70 : // safe to modify the contents of the argument after Get returns. The
71 : // returned slice will remain valid until the returned Closer is closed. On
72 : // success, the caller MUST call closer.Close() or a memory leak will occur.
73 : Get(key []byte) (value []byte, closer io.Closer, err error)
74 :
75 : // NewIter returns an iterator that is unpositioned (Iterator.Valid() will
76 : // return false). The iterator can be positioned via a call to SeekGE,
77 : // SeekLT, First or Last.
78 : NewIter(o *IterOptions) (*Iterator, error)
79 :
80 : // Close closes the Reader. It may or may not close any underlying io.Reader
81 : // or io.Writer, depending on how the DB was created.
82 : //
83 : // It is not safe to close a DB until all outstanding iterators are closed.
84 : // It is valid to call Close multiple times. Other methods should not be
85 : // called after the DB has been closed.
86 : Close() error
87 : }
88 :
89 : // Writer is a writable key/value store.
90 : //
91 : // Goroutine safety is dependent on the specific implementation.
92 : type Writer interface {
93 : // Apply the operations contained in the batch to the DB.
94 : //
95 : // It is safe to modify the contents of the arguments after Apply returns.
96 : Apply(batch *Batch, o *WriteOptions) error
97 :
98 : // Delete deletes the value for the given key. Deletes are blind all will
99 : // succeed even if the given key does not exist.
100 : //
101 : // It is safe to modify the contents of the arguments after Delete returns.
102 : Delete(key []byte, o *WriteOptions) error
103 :
104 : // DeleteSized behaves identically to Delete, but takes an additional
105 : // argument indicating the size of the value being deleted. DeleteSized
106 : // should be preferred when the caller has the expectation that there exists
107 : // a single internal KV pair for the key (eg, the key has not been
108 : // overwritten recently), and the caller knows the size of its value.
109 : //
110 : // DeleteSized will record the value size within the tombstone and use it to
111 : // inform compaction-picking heuristics which strive to reduce space
112 : // amplification in the LSM. This "calling your shot" mechanic allows the
113 : // storage engine to more accurately estimate and reduce space
114 : // amplification.
115 : //
116 : // It is safe to modify the contents of the arguments after DeleteSized
117 : // returns.
118 : DeleteSized(key []byte, valueSize uint32, _ *WriteOptions) error
119 :
120 : // SingleDelete is similar to Delete in that it deletes the value for the given key. Like Delete,
121 : // it is a blind operation that will succeed even if the given key does not exist.
122 : //
123 : // WARNING: Undefined (non-deterministic) behavior will result if a key is overwritten and
124 : // then deleted using SingleDelete. The record may appear deleted immediately, but be
125 : // resurrected at a later time after compactions have been performed. Or the record may
126 : // be deleted permanently. A Delete operation lays down a "tombstone" which shadows all
127 : // previous versions of a key. The SingleDelete operation is akin to "anti-matter" and will
128 : // only delete the most recently written version for a key. These different semantics allow
129 : // the DB to avoid propagating a SingleDelete operation during a compaction as soon as the
130 : // corresponding Set operation is encountered. These semantics require extreme care to handle
131 : // properly. Only use if you have a workload where the performance gain is critical and you
132 : // can guarantee that a record is written once and then deleted once.
133 : //
134 : // SingleDelete is internally transformed into a Delete if the most recent record for a key is either
135 : // a Merge or Delete record.
136 : //
137 : // It is safe to modify the contents of the arguments after SingleDelete returns.
138 : SingleDelete(key []byte, o *WriteOptions) error
139 :
140 : // DeleteRange deletes all of the point keys (and values) in the range
141 : // [start,end) (inclusive on start, exclusive on end). DeleteRange does NOT
142 : // delete overlapping range keys (eg, keys set via RangeKeySet).
143 : //
144 : // It is safe to modify the contents of the arguments after DeleteRange
145 : // returns.
146 : DeleteRange(start, end []byte, o *WriteOptions) error
147 :
148 : // LogData adds the specified to the batch. The data will be written to the
149 : // WAL, but not added to memtables or sstables. Log data is never indexed,
150 : // which makes it useful for testing WAL performance.
151 : //
152 : // It is safe to modify the contents of the argument after LogData returns.
153 : LogData(data []byte, opts *WriteOptions) error
154 :
155 : // Merge merges the value for the given key. The details of the merge are
156 : // dependent upon the configured merge operation.
157 : //
158 : // It is safe to modify the contents of the arguments after Merge returns.
159 : Merge(key, value []byte, o *WriteOptions) error
160 :
161 : // Set sets the value for the given key. It overwrites any previous value
162 : // for that key; a DB is not a multi-map.
163 : //
164 : // It is safe to modify the contents of the arguments after Set returns.
165 : Set(key, value []byte, o *WriteOptions) error
166 :
167 : // RangeKeySet sets a range key mapping the key range [start, end) at the MVCC
168 : // timestamp suffix to value. The suffix is optional. If any portion of the key
169 : // range [start, end) is already set by a range key with the same suffix value,
170 : // RangeKeySet overrides it.
171 : //
172 : // It is safe to modify the contents of the arguments after RangeKeySet returns.
173 : RangeKeySet(start, end, suffix, value []byte, opts *WriteOptions) error
174 :
175 : // RangeKeyUnset removes a range key mapping the key range [start, end) at the
176 : // MVCC timestamp suffix. The suffix may be omitted to remove an unsuffixed
177 : // range key. RangeKeyUnset only removes portions of range keys that fall within
178 : // the [start, end) key span, and only range keys with suffixes that exactly
179 : // match the unset suffix.
180 : //
181 : // It is safe to modify the contents of the arguments after RangeKeyUnset
182 : // returns.
183 : RangeKeyUnset(start, end, suffix []byte, opts *WriteOptions) error
184 :
185 : // RangeKeyDelete deletes all of the range keys in the range [start,end)
186 : // (inclusive on start, exclusive on end). It does not delete point keys (for
187 : // that use DeleteRange). RangeKeyDelete removes all range keys within the
188 : // bounds, including those with or without suffixes.
189 : //
190 : // It is safe to modify the contents of the arguments after RangeKeyDelete
191 : // returns.
192 : RangeKeyDelete(start, end []byte, opts *WriteOptions) error
193 : }
194 :
195 : // CPUWorkHandle represents a handle used by the CPUWorkPermissionGranter API.
196 : type CPUWorkHandle interface {
197 : // Permitted indicates whether Pebble can use additional CPU resources.
198 : Permitted() bool
199 : }
200 :
201 : // CPUWorkPermissionGranter is used to request permission to opportunistically
202 : // use additional CPUs to speed up internal background work.
203 : type CPUWorkPermissionGranter interface {
204 : // GetPermission returns a handle regardless of whether permission is granted
205 : // or not. In the latter case, the handle is only useful for recording
206 : // the CPU time actually spent on this calling goroutine.
207 : GetPermission(time.Duration) CPUWorkHandle
208 : // CPUWorkDone must be called regardless of whether CPUWorkHandle.Permitted
209 : // returns true or false.
210 : CPUWorkDone(CPUWorkHandle)
211 : }
212 :
213 : // Use a default implementation for the CPU work granter to avoid excessive nil
214 : // checks in the code.
215 : type defaultCPUWorkHandle struct{}
216 :
217 1 : func (d defaultCPUWorkHandle) Permitted() bool {
218 1 : return false
219 1 : }
220 :
221 : type defaultCPUWorkGranter struct{}
222 :
223 1 : func (d defaultCPUWorkGranter) GetPermission(_ time.Duration) CPUWorkHandle {
224 1 : return defaultCPUWorkHandle{}
225 1 : }
226 :
227 1 : func (d defaultCPUWorkGranter) CPUWorkDone(_ CPUWorkHandle) {}
228 :
229 : // DB provides a concurrent, persistent ordered key/value store.
230 : //
231 : // A DB's basic operations (Get, Set, Delete) should be self-explanatory. Get
232 : // and Delete will return ErrNotFound if the requested key is not in the store.
233 : // Callers are free to ignore this error.
234 : //
235 : // A DB also allows for iterating over the key/value pairs in key order. If d
236 : // is a DB, the code below prints all key/value pairs whose keys are 'greater
237 : // than or equal to' k:
238 : //
239 : // iter := d.NewIter(readOptions)
240 : // for iter.SeekGE(k); iter.Valid(); iter.Next() {
241 : // fmt.Printf("key=%q value=%q\n", iter.Key(), iter.Value())
242 : // }
243 : // return iter.Close()
244 : //
245 : // The Options struct holds the optional parameters for the DB, including a
246 : // Comparer to define a 'less than' relationship over keys. It is always valid
247 : // to pass a nil *Options, which means to use the default parameter values. Any
248 : // zero field of a non-nil *Options also means to use the default value for
249 : // that parameter. Thus, the code below uses a custom Comparer, but the default
250 : // values for every other parameter:
251 : //
252 : // db := pebble.Open(&Options{
253 : // Comparer: myComparer,
254 : // })
255 : type DB struct {
256 : // The count and size of referenced memtables. This includes memtables
257 : // present in DB.mu.mem.queue, as well as memtables that have been flushed
258 : // but are still referenced by an inuse readState, as well as up to one
259 : // memTable waiting to be reused and stored in d.memTableRecycle.
260 : memTableCount atomic.Int64
261 : memTableReserved atomic.Int64 // number of bytes reserved in the cache for memtables
262 : // memTableRecycle holds a pointer to an obsolete memtable. The next
263 : // memtable allocation will reuse this memtable if it has not already been
264 : // recycled.
265 : memTableRecycle atomic.Pointer[memTable]
266 :
267 : // The size of the current log file (i.e. db.mu.log.queue[len(queue)-1].
268 : logSize atomic.Uint64
269 :
270 : // The number of bytes available on disk.
271 : diskAvailBytes atomic.Uint64
272 :
273 : cacheID uint64
274 : dirname string
275 : walDirname string
276 : opts *Options
277 : cmp Compare
278 : equal Equal
279 : merge Merge
280 : split Split
281 : abbreviatedKey AbbreviatedKey
282 : // The threshold for determining when a batch is "large" and will skip being
283 : // inserted into a memtable.
284 : largeBatchThreshold uint64
285 : // The current OPTIONS file number.
286 : optionsFileNum base.DiskFileNum
287 : // The on-disk size of the current OPTIONS file.
288 : optionsFileSize uint64
289 :
290 : // objProvider is used to access and manage SSTs.
291 : objProvider objstorage.Provider
292 :
293 : fileLock *Lock
294 : dataDir vfs.File
295 : walDir vfs.File
296 :
297 : tableCache *tableCacheContainer
298 : newIters tableNewIters
299 : tableNewRangeKeyIter keyspan.TableNewSpanIter
300 :
301 : commit *commitPipeline
302 :
303 : // readState provides access to the state needed for reading without needing
304 : // to acquire DB.mu.
305 : readState struct {
306 : sync.RWMutex
307 : val *readState
308 : }
309 : // logRecycler holds a set of log file numbers that are available for
310 : // reuse. Writing to a recycled log file is faster than to a new log file on
311 : // some common filesystems (xfs, and ext3/4) due to avoiding metadata
312 : // updates.
313 : logRecycler logRecycler
314 :
315 : closed *atomic.Value
316 : closedCh chan struct{}
317 :
318 : cleanupManager *cleanupManager
319 : // testingAlwaysWaitForCleanup is set by some tests to force waiting for
320 : // obsolete file deletion (to make events deterministic).
321 : testingAlwaysWaitForCleanup bool
322 :
323 : // During an iterator close, we may asynchronously schedule read compactions.
324 : // We want to wait for those goroutines to finish, before closing the DB.
325 : // compactionShedulers.Wait() should not be called while the DB.mu is held.
326 : compactionSchedulers sync.WaitGroup
327 :
328 : // The main mutex protecting internal DB state. This mutex encompasses many
329 : // fields because those fields need to be accessed and updated atomically. In
330 : // particular, the current version, log.*, mem.*, and snapshot list need to
331 : // be accessed and updated atomically during compaction.
332 : //
333 : // Care is taken to avoid holding DB.mu during IO operations. Accomplishing
334 : // this sometimes requires releasing DB.mu in a method that was called with
335 : // it held. See versionSet.logAndApply() and DB.makeRoomForWrite() for
336 : // examples. This is a common pattern, so be careful about expectations that
337 : // DB.mu will be held continuously across a set of calls.
338 : mu struct {
339 : sync.Mutex
340 :
341 : formatVers struct {
342 : // vers is the database's current format major version.
343 : // Backwards-incompatible features are gated behind new
344 : // format major versions and not enabled until a database's
345 : // version is ratcheted upwards.
346 : //
347 : // Although this is under the `mu` prefix, readers may read vers
348 : // atomically without holding d.mu. Writers must only write to this
349 : // value through finalizeFormatVersUpgrade which requires d.mu is
350 : // held.
351 : vers atomic.Uint64
352 : // marker is the atomic marker for the format major version.
353 : // When a database's version is ratcheted upwards, the
354 : // marker is moved in order to atomically record the new
355 : // version.
356 : marker *atomicfs.Marker
357 : // ratcheting when set to true indicates that the database is
358 : // currently in the process of ratcheting the format major version
359 : // to vers + 1. As a part of ratcheting the format major version,
360 : // migrations may drop and re-acquire the mutex.
361 : ratcheting bool
362 : }
363 :
364 : // The ID of the next job. Job IDs are passed to event listener
365 : // notifications and act as a mechanism for tying together the events and
366 : // log messages for a single job such as a flush, compaction, or file
367 : // ingestion. Job IDs are not serialized to disk or used for correctness.
368 : nextJobID int
369 :
370 : // The collection of immutable versions and state about the log and visible
371 : // sequence numbers. Use the pointer here to ensure the atomic fields in
372 : // version set are aligned properly.
373 : versions *versionSet
374 :
375 : log struct {
376 : // The queue of logs, containing both flushed and unflushed logs. The
377 : // flushed logs will be a prefix, the unflushed logs a suffix. The
378 : // delimeter between flushed and unflushed logs is
379 : // versionSet.minUnflushedLogNum.
380 : queue []fileInfo
381 : // The number of input bytes to the log. This is the raw size of the
382 : // batches written to the WAL, without the overhead of the record
383 : // envelopes.
384 : bytesIn uint64
385 : // The LogWriter is protected by commitPipeline.mu. This allows log
386 : // writes to be performed without holding DB.mu, but requires both
387 : // commitPipeline.mu and DB.mu to be held when rotating the WAL/memtable
388 : // (i.e. makeRoomForWrite).
389 : *record.LogWriter
390 : // Can be nil.
391 : metrics struct {
392 : fsyncLatency prometheus.Histogram
393 : record.LogWriterMetrics
394 : }
395 : registerLogWriterForTesting func(w *record.LogWriter)
396 : }
397 :
398 : mem struct {
399 : // The current mutable memTable.
400 : mutable *memTable
401 : // Queue of flushables (the mutable memtable is at end). Elements are
402 : // added to the end of the slice and removed from the beginning. Once an
403 : // index is set it is never modified making a fixed slice immutable and
404 : // safe for concurrent reads.
405 : queue flushableList
406 : // nextSize is the size of the next memtable. The memtable size starts at
407 : // min(256KB,Options.MemTableSize) and doubles each time a new memtable
408 : // is allocated up to Options.MemTableSize. This reduces the memory
409 : // footprint of memtables when lots of DB instances are used concurrently
410 : // in test environments.
411 : nextSize uint64
412 : }
413 :
414 : compact struct {
415 : // Condition variable used to signal when a flush or compaction has
416 : // completed. Used by the write-stall mechanism to wait for the stall
417 : // condition to clear. See DB.makeRoomForWrite().
418 : cond sync.Cond
419 : // True when a flush is in progress.
420 : flushing bool
421 : // The number of ongoing compactions.
422 : compactingCount int
423 : // The list of deletion hints, suggesting ranges for delete-only
424 : // compactions.
425 : deletionHints []deleteCompactionHint
426 : // The list of manual compactions. The next manual compaction to perform
427 : // is at the start of the list. New entries are added to the end.
428 : manual []*manualCompaction
429 : // inProgress is the set of in-progress flushes and compactions.
430 : // It's used in the calculation of some metrics and to initialize L0
431 : // sublevels' state. Some of the compactions contained within this
432 : // map may have already committed an edit to the version but are
433 : // lingering performing cleanup, like deleting obsolete files.
434 : inProgress map[*compaction]struct{}
435 :
436 : // rescheduleReadCompaction indicates to an iterator that a read compaction
437 : // should be scheduled.
438 : rescheduleReadCompaction bool
439 :
440 : // readCompactions is a readCompactionQueue which keeps track of the
441 : // compactions which we might have to perform.
442 : readCompactions readCompactionQueue
443 :
444 : // The cumulative duration of all completed compactions since Open.
445 : // Does not include flushes.
446 : duration time.Duration
447 : // Flush throughput metric.
448 : flushWriteThroughput ThroughputMetric
449 : // The idle start time for the flush "loop", i.e., when the flushing
450 : // bool above transitions to false.
451 : noOngoingFlushStartTime time.Time
452 : }
453 :
454 : // Non-zero when file cleaning is disabled. The disabled count acts as a
455 : // reference count to prohibit file cleaning. See
456 : // DB.{disable,Enable}FileDeletions().
457 : disableFileDeletions int
458 :
459 : snapshots struct {
460 : // The list of active snapshots.
461 : snapshotList
462 :
463 : // The cumulative count and size of snapshot-pinned keys written to
464 : // sstables.
465 : cumulativePinnedCount uint64
466 : cumulativePinnedSize uint64
467 : }
468 :
469 : tableStats struct {
470 : // Condition variable used to signal the completion of a
471 : // job to collect table stats.
472 : cond sync.Cond
473 : // True when a stat collection operation is in progress.
474 : loading bool
475 : // True if stat collection has loaded statistics for all tables
476 : // other than those listed explicitly in pending. This flag starts
477 : // as false when a database is opened and flips to true once stat
478 : // collection has caught up.
479 : loadedInitial bool
480 : // A slice of files for which stats have not been computed.
481 : // Compactions, ingests, flushes append files to be processed. An
482 : // active stat collection goroutine clears the list and processes
483 : // them.
484 : pending []manifest.NewFileEntry
485 : }
486 :
487 : tableValidation struct {
488 : // cond is a condition variable used to signal the completion of a
489 : // job to validate one or more sstables.
490 : cond sync.Cond
491 : // pending is a slice of metadata for sstables waiting to be
492 : // validated. Only physical sstables should be added to the pending
493 : // queue.
494 : pending []newFileEntry
495 : // validating is set to true when validation is running.
496 : validating bool
497 : }
498 : }
499 :
500 : // Normally equal to time.Now() but may be overridden in tests.
501 : timeNow func() time.Time
502 : // the time at database Open; may be used to compute metrics like effective
503 : // compaction concurrency
504 : openedAt time.Time
505 : }
506 :
507 : var _ Reader = (*DB)(nil)
508 : var _ Writer = (*DB)(nil)
509 :
510 : // TestOnlyWaitForCleaning MUST only be used in tests.
511 0 : func (d *DB) TestOnlyWaitForCleaning() {
512 0 : d.cleanupManager.Wait()
513 0 : }
514 :
515 : // Get gets the value for the given key. It returns ErrNotFound if the DB does
516 : // not contain the key.
517 : //
518 : // The caller should not modify the contents of the returned slice, but it is
519 : // safe to modify the contents of the argument after Get returns. The returned
520 : // slice will remain valid until the returned Closer is closed. On success, the
521 : // caller MUST call closer.Close() or a memory leak will occur.
522 1 : func (d *DB) Get(key []byte) ([]byte, io.Closer, error) {
523 1 : return d.getInternal(key, nil /* batch */, nil /* snapshot */)
524 1 : }
525 :
526 : type getIterAlloc struct {
527 : dbi Iterator
528 : keyBuf []byte
529 : get getIter
530 : }
531 :
532 : var getIterAllocPool = sync.Pool{
533 1 : New: func() interface{} {
534 1 : return &getIterAlloc{}
535 1 : },
536 : }
537 :
538 1 : func (d *DB) getInternal(key []byte, b *Batch, s *Snapshot) ([]byte, io.Closer, error) {
539 1 : if err := d.closed.Load(); err != nil {
540 0 : panic(err)
541 : }
542 :
543 : // Grab and reference the current readState. This prevents the underlying
544 : // files in the associated version from being deleted if there is a current
545 : // compaction. The readState is unref'd by Iterator.Close().
546 1 : readState := d.loadReadState()
547 1 :
548 1 : // Determine the seqnum to read at after grabbing the read state (current and
549 1 : // memtables) above.
550 1 : var seqNum uint64
551 1 : if s != nil {
552 1 : seqNum = s.seqNum
553 1 : } else {
554 1 : seqNum = d.mu.versions.visibleSeqNum.Load()
555 1 : }
556 :
557 1 : buf := getIterAllocPool.Get().(*getIterAlloc)
558 1 :
559 1 : get := &buf.get
560 1 : *get = getIter{
561 1 : logger: d.opts.Logger,
562 1 : cmp: d.cmp,
563 1 : equal: d.equal,
564 1 : split: d.split,
565 1 : newIters: d.newIters,
566 1 : snapshot: seqNum,
567 1 : key: key,
568 1 : batch: b,
569 1 : mem: readState.memtables,
570 1 : l0: readState.current.L0SublevelFiles,
571 1 : version: readState.current,
572 1 : }
573 1 :
574 1 : // Strip off memtables which cannot possibly contain the seqNum being read
575 1 : // at.
576 1 : for len(get.mem) > 0 {
577 1 : n := len(get.mem)
578 1 : if logSeqNum := get.mem[n-1].logSeqNum; logSeqNum < seqNum {
579 1 : break
580 : }
581 1 : get.mem = get.mem[:n-1]
582 : }
583 :
584 1 : i := &buf.dbi
585 1 : pointIter := get
586 1 : *i = Iterator{
587 1 : ctx: context.Background(),
588 1 : getIterAlloc: buf,
589 1 : iter: pointIter,
590 1 : pointIter: pointIter,
591 1 : merge: d.merge,
592 1 : comparer: *d.opts.Comparer,
593 1 : readState: readState,
594 1 : keyBuf: buf.keyBuf,
595 1 : }
596 1 :
597 1 : if !i.First() {
598 1 : err := i.Close()
599 1 : if err != nil {
600 0 : return nil, nil, err
601 0 : }
602 1 : return nil, nil, ErrNotFound
603 : }
604 1 : return i.Value(), i, nil
605 : }
606 :
607 : // Set sets the value for the given key. It overwrites any previous value
608 : // for that key; a DB is not a multi-map.
609 : //
610 : // It is safe to modify the contents of the arguments after Set returns.
611 1 : func (d *DB) Set(key, value []byte, opts *WriteOptions) error {
612 1 : b := newBatch(d)
613 1 : _ = b.Set(key, value, opts)
614 1 : if err := d.Apply(b, opts); err != nil {
615 0 : return err
616 0 : }
617 : // Only release the batch on success.
618 1 : b.release()
619 1 : return nil
620 : }
621 :
622 : // Delete deletes the value for the given key. Deletes are blind all will
623 : // succeed even if the given key does not exist.
624 : //
625 : // It is safe to modify the contents of the arguments after Delete returns.
626 1 : func (d *DB) Delete(key []byte, opts *WriteOptions) error {
627 1 : b := newBatch(d)
628 1 : _ = b.Delete(key, opts)
629 1 : if err := d.Apply(b, opts); err != nil {
630 0 : return err
631 0 : }
632 : // Only release the batch on success.
633 1 : b.release()
634 1 : return nil
635 : }
636 :
637 : // DeleteSized behaves identically to Delete, but takes an additional
638 : // argument indicating the size of the value being deleted. DeleteSized
639 : // should be preferred when the caller has the expectation that there exists
640 : // a single internal KV pair for the key (eg, the key has not been
641 : // overwritten recently), and the caller knows the size of its value.
642 : //
643 : // DeleteSized will record the value size within the tombstone and use it to
644 : // inform compaction-picking heuristics which strive to reduce space
645 : // amplification in the LSM. This "calling your shot" mechanic allows the
646 : // storage engine to more accurately estimate and reduce space amplification.
647 : //
648 : // It is safe to modify the contents of the arguments after DeleteSized
649 : // returns.
650 1 : func (d *DB) DeleteSized(key []byte, valueSize uint32, opts *WriteOptions) error {
651 1 : b := newBatch(d)
652 1 : _ = b.DeleteSized(key, valueSize, opts)
653 1 : if err := d.Apply(b, opts); err != nil {
654 0 : return err
655 0 : }
656 : // Only release the batch on success.
657 1 : b.release()
658 1 : return nil
659 : }
660 :
661 : // SingleDelete adds an action to the batch that single deletes the entry for key.
662 : // See Writer.SingleDelete for more details on the semantics of SingleDelete.
663 : //
664 : // It is safe to modify the contents of the arguments after SingleDelete returns.
665 1 : func (d *DB) SingleDelete(key []byte, opts *WriteOptions) error {
666 1 : b := newBatch(d)
667 1 : _ = b.SingleDelete(key, opts)
668 1 : if err := d.Apply(b, opts); err != nil {
669 0 : return err
670 0 : }
671 : // Only release the batch on success.
672 1 : b.release()
673 1 : return nil
674 : }
675 :
676 : // DeleteRange deletes all of the keys (and values) in the range [start,end)
677 : // (inclusive on start, exclusive on end).
678 : //
679 : // It is safe to modify the contents of the arguments after DeleteRange
680 : // returns.
681 1 : func (d *DB) DeleteRange(start, end []byte, opts *WriteOptions) error {
682 1 : b := newBatch(d)
683 1 : _ = b.DeleteRange(start, end, opts)
684 1 : if err := d.Apply(b, opts); err != nil {
685 0 : return err
686 0 : }
687 : // Only release the batch on success.
688 1 : b.release()
689 1 : return nil
690 : }
691 :
692 : // Merge adds an action to the DB that merges the value at key with the new
693 : // value. The details of the merge are dependent upon the configured merge
694 : // operator.
695 : //
696 : // It is safe to modify the contents of the arguments after Merge returns.
697 1 : func (d *DB) Merge(key, value []byte, opts *WriteOptions) error {
698 1 : b := newBatch(d)
699 1 : _ = b.Merge(key, value, opts)
700 1 : if err := d.Apply(b, opts); err != nil {
701 0 : return err
702 0 : }
703 : // Only release the batch on success.
704 1 : b.release()
705 1 : return nil
706 : }
707 :
708 : // LogData adds the specified to the batch. The data will be written to the
709 : // WAL, but not added to memtables or sstables. Log data is never indexed,
710 : // which makes it useful for testing WAL performance.
711 : //
712 : // It is safe to modify the contents of the argument after LogData returns.
713 0 : func (d *DB) LogData(data []byte, opts *WriteOptions) error {
714 0 : b := newBatch(d)
715 0 : _ = b.LogData(data, opts)
716 0 : if err := d.Apply(b, opts); err != nil {
717 0 : return err
718 0 : }
719 : // Only release the batch on success.
720 0 : b.release()
721 0 : return nil
722 : }
723 :
724 : // RangeKeySet sets a range key mapping the key range [start, end) at the MVCC
725 : // timestamp suffix to value. The suffix is optional. If any portion of the key
726 : // range [start, end) is already set by a range key with the same suffix value,
727 : // RangeKeySet overrides it.
728 : //
729 : // It is safe to modify the contents of the arguments after RangeKeySet returns.
730 1 : func (d *DB) RangeKeySet(start, end, suffix, value []byte, opts *WriteOptions) error {
731 1 : b := newBatch(d)
732 1 : _ = b.RangeKeySet(start, end, suffix, value, opts)
733 1 : if err := d.Apply(b, opts); err != nil {
734 0 : return err
735 0 : }
736 : // Only release the batch on success.
737 1 : b.release()
738 1 : return nil
739 : }
740 :
741 : // RangeKeyUnset removes a range key mapping the key range [start, end) at the
742 : // MVCC timestamp suffix. The suffix may be omitted to remove an unsuffixed
743 : // range key. RangeKeyUnset only removes portions of range keys that fall within
744 : // the [start, end) key span, and only range keys with suffixes that exactly
745 : // match the unset suffix.
746 : //
747 : // It is safe to modify the contents of the arguments after RangeKeyUnset
748 : // returns.
749 1 : func (d *DB) RangeKeyUnset(start, end, suffix []byte, opts *WriteOptions) error {
750 1 : b := newBatch(d)
751 1 : _ = b.RangeKeyUnset(start, end, suffix, opts)
752 1 : if err := d.Apply(b, opts); err != nil {
753 0 : return err
754 0 : }
755 : // Only release the batch on success.
756 1 : b.release()
757 1 : return nil
758 : }
759 :
760 : // RangeKeyDelete deletes all of the range keys in the range [start,end)
761 : // (inclusive on start, exclusive on end). It does not delete point keys (for
762 : // that use DeleteRange). RangeKeyDelete removes all range keys within the
763 : // bounds, including those with or without suffixes.
764 : //
765 : // It is safe to modify the contents of the arguments after RangeKeyDelete
766 : // returns.
767 1 : func (d *DB) RangeKeyDelete(start, end []byte, opts *WriteOptions) error {
768 1 : b := newBatch(d)
769 1 : _ = b.RangeKeyDelete(start, end, opts)
770 1 : if err := d.Apply(b, opts); err != nil {
771 0 : return err
772 0 : }
773 : // Only release the batch on success.
774 1 : b.release()
775 1 : return nil
776 : }
777 :
778 : // Apply the operations contained in the batch to the DB. If the batch is large
779 : // the contents of the batch may be retained by the database. If that occurs
780 : // the batch contents will be cleared preventing the caller from attempting to
781 : // reuse them.
782 : //
783 : // It is safe to modify the contents of the arguments after Apply returns.
784 1 : func (d *DB) Apply(batch *Batch, opts *WriteOptions) error {
785 1 : return d.applyInternal(batch, opts, false)
786 1 : }
787 :
788 : // ApplyNoSyncWait must only be used when opts.Sync is true and the caller
789 : // does not want to wait for the WAL fsync to happen. The method will return
790 : // once the mutation is applied to the memtable and is visible (note that a
791 : // mutation is visible before the WAL sync even in the wait case, so we have
792 : // not weakened the durability semantics). The caller must call Batch.SyncWait
793 : // to wait for the WAL fsync. The caller must not Close the batch without
794 : // first calling Batch.SyncWait.
795 : //
796 : // RECOMMENDATION: Prefer using Apply unless you really understand why you
797 : // need ApplyNoSyncWait.
798 : // EXPERIMENTAL: API/feature subject to change. Do not yet use outside
799 : // CockroachDB.
800 1 : func (d *DB) ApplyNoSyncWait(batch *Batch, opts *WriteOptions) error {
801 1 : if !opts.Sync {
802 0 : return errors.Errorf("cannot request asynchonous apply when WriteOptions.Sync is false")
803 0 : }
804 1 : return d.applyInternal(batch, opts, true)
805 : }
806 :
807 : // REQUIRES: noSyncWait => opts.Sync
808 1 : func (d *DB) applyInternal(batch *Batch, opts *WriteOptions, noSyncWait bool) error {
809 1 : if err := d.closed.Load(); err != nil {
810 0 : panic(err)
811 : }
812 1 : if batch.applied.Load() {
813 0 : panic("pebble: batch already applied")
814 : }
815 1 : if d.opts.ReadOnly {
816 0 : return ErrReadOnly
817 0 : }
818 1 : if batch.db != nil && batch.db != d {
819 0 : panic(fmt.Sprintf("pebble: batch db mismatch: %p != %p", batch.db, d))
820 : }
821 :
822 1 : sync := opts.GetSync()
823 1 : if sync && d.opts.DisableWAL {
824 0 : return errors.New("pebble: WAL disabled")
825 0 : }
826 :
827 1 : if batch.minimumFormatMajorVersion != FormatMostCompatible {
828 1 : if fmv := d.FormatMajorVersion(); fmv < batch.minimumFormatMajorVersion {
829 0 : panic(fmt.Sprintf(
830 0 : "pebble: batch requires at least format major version %d (current: %d)",
831 0 : batch.minimumFormatMajorVersion, fmv,
832 0 : ))
833 : }
834 : }
835 :
836 1 : if batch.countRangeKeys > 0 {
837 1 : if d.split == nil {
838 0 : return errNoSplit
839 0 : }
840 : // TODO(jackson): Assert that all range key operands are suffixless.
841 : }
842 :
843 1 : if batch.db == nil {
844 0 : batch.refreshMemTableSize()
845 0 : }
846 1 : if batch.memTableSize >= d.largeBatchThreshold {
847 1 : batch.flushable = newFlushableBatch(batch, d.opts.Comparer)
848 1 : }
849 1 : if err := d.commit.Commit(batch, sync, noSyncWait); err != nil {
850 0 : // There isn't much we can do on an error here. The commit pipeline will be
851 0 : // horked at this point.
852 0 : d.opts.Logger.Fatalf("pebble: fatal commit error: %v", err)
853 0 : }
854 : // If this is a large batch, we need to clear the batch contents as the
855 : // flushable batch may still be present in the flushables queue.
856 : //
857 : // TODO(peter): Currently large batches are written to the WAL. We could
858 : // skip the WAL write and instead wait for the large batch to be flushed to
859 : // an sstable. For a 100 MB batch, this might actually be faster. For a 1
860 : // GB batch this is almost certainly faster.
861 1 : if batch.flushable != nil {
862 1 : batch.data = nil
863 1 : }
864 1 : return nil
865 : }
866 :
867 1 : func (d *DB) commitApply(b *Batch, mem *memTable) error {
868 1 : if b.flushable != nil {
869 1 : // This is a large batch which was already added to the immutable queue.
870 1 : return nil
871 1 : }
872 1 : err := mem.apply(b, b.SeqNum())
873 1 : if err != nil {
874 0 : return err
875 0 : }
876 :
877 : // If the batch contains range tombstones and the database is configured
878 : // to flush range deletions, schedule a delayed flush so that disk space
879 : // may be reclaimed without additional writes or an explicit flush.
880 1 : if b.countRangeDels > 0 && d.opts.FlushDelayDeleteRange > 0 {
881 1 : d.mu.Lock()
882 1 : d.maybeScheduleDelayedFlush(mem, d.opts.FlushDelayDeleteRange)
883 1 : d.mu.Unlock()
884 1 : }
885 :
886 : // If the batch contains range keys and the database is configured to flush
887 : // range keys, schedule a delayed flush so that the range keys are cleared
888 : // from the memtable.
889 1 : if b.countRangeKeys > 0 && d.opts.FlushDelayRangeKey > 0 {
890 1 : d.mu.Lock()
891 1 : d.maybeScheduleDelayedFlush(mem, d.opts.FlushDelayRangeKey)
892 1 : d.mu.Unlock()
893 1 : }
894 :
895 1 : if mem.writerUnref() {
896 1 : d.mu.Lock()
897 1 : d.maybeScheduleFlush()
898 1 : d.mu.Unlock()
899 1 : }
900 1 : return nil
901 : }
902 :
903 1 : func (d *DB) commitWrite(b *Batch, syncWG *sync.WaitGroup, syncErr *error) (*memTable, error) {
904 1 : var size int64
905 1 : repr := b.Repr()
906 1 :
907 1 : if b.flushable != nil {
908 1 : // We have a large batch. Such batches are special in that they don't get
909 1 : // added to the memtable, and are instead inserted into the queue of
910 1 : // memtables. The call to makeRoomForWrite with this batch will force the
911 1 : // current memtable to be flushed. We want the large batch to be part of
912 1 : // the same log, so we add it to the WAL here, rather than after the call
913 1 : // to makeRoomForWrite().
914 1 : //
915 1 : // Set the sequence number since it was not set to the correct value earlier
916 1 : // (see comment in newFlushableBatch()).
917 1 : b.flushable.setSeqNum(b.SeqNum())
918 1 : if !d.opts.DisableWAL {
919 1 : var err error
920 1 : size, err = d.mu.log.SyncRecord(repr, syncWG, syncErr)
921 1 : if err != nil {
922 0 : panic(err)
923 : }
924 : }
925 : }
926 :
927 1 : d.mu.Lock()
928 1 :
929 1 : var err error
930 1 : if !b.ingestedSSTBatch {
931 1 : // Batches which contain keys of kind InternalKeyKindIngestSST will
932 1 : // never be applied to the memtable, so we don't need to make room for
933 1 : // write. For the other cases, switch out the memtable if there was not
934 1 : // enough room to store the batch.
935 1 : err = d.makeRoomForWrite(b)
936 1 : }
937 :
938 1 : if err == nil && !d.opts.DisableWAL {
939 1 : d.mu.log.bytesIn += uint64(len(repr))
940 1 : }
941 :
942 : // Grab a reference to the memtable while holding DB.mu. Note that for
943 : // non-flushable batches (b.flushable == nil) makeRoomForWrite() added a
944 : // reference to the memtable which will prevent it from being flushed until
945 : // we unreference it. This reference is dropped in DB.commitApply().
946 1 : mem := d.mu.mem.mutable
947 1 :
948 1 : d.mu.Unlock()
949 1 : if err != nil {
950 0 : return nil, err
951 0 : }
952 :
953 1 : if d.opts.DisableWAL {
954 1 : return mem, nil
955 1 : }
956 :
957 1 : if b.flushable == nil {
958 1 : size, err = d.mu.log.SyncRecord(repr, syncWG, syncErr)
959 1 : if err != nil {
960 0 : panic(err)
961 : }
962 : }
963 :
964 1 : d.logSize.Store(uint64(size))
965 1 : return mem, err
966 : }
967 :
968 : type iterAlloc struct {
969 : dbi Iterator
970 : keyBuf []byte
971 : boundsBuf [2][]byte
972 : prefixOrFullSeekKey []byte
973 : merging mergingIter
974 : mlevels [3 + numLevels]mergingIterLevel
975 : levels [3 + numLevels]levelIter
976 : levelsPositioned [3 + numLevels]bool
977 : }
978 :
979 : var iterAllocPool = sync.Pool{
980 1 : New: func() interface{} {
981 1 : return &iterAlloc{}
982 1 : },
983 : }
984 :
985 : // snapshotIterOpts denotes snapshot-related iterator options when calling
986 : // newIter. These are the possible cases for a snapshotIterOpts:
987 : // - No snapshot: All fields are zero values.
988 : // - Classic snapshot: Only `seqNum` is set. The latest readState will be used
989 : // and the specified seqNum will be used as the snapshot seqNum.
990 : // - EventuallyFileOnlySnapshot (EFOS) in pre-file-only state: `readState` and
991 : // `seqNum` are set. The specified readState is used with the snapshot
992 : // sequence number.
993 : // - EFOS in file-only state: Only `seqNum` and `vers` are set. No readState
994 : // is referenced, as all the relevant SSTs are referenced by the *version.
995 : type snapshotIterOpts struct {
996 : seqNum uint64
997 : readState *readState
998 : vers *version
999 : }
1000 :
1001 : // newIter constructs a new iterator, merging in batch iterators as an extra
1002 : // level.
1003 : func (d *DB) newIter(
1004 : ctx context.Context, batch *Batch, sOpts snapshotIterOpts, o *IterOptions,
1005 1 : ) *Iterator {
1006 1 : if err := d.closed.Load(); err != nil {
1007 0 : panic(err)
1008 : }
1009 1 : seqNum := sOpts.seqNum
1010 1 : if o.rangeKeys() {
1011 1 : if d.FormatMajorVersion() < FormatRangeKeys {
1012 0 : panic(fmt.Sprintf(
1013 0 : "pebble: range keys require at least format major version %d (current: %d)",
1014 0 : FormatRangeKeys, d.FormatMajorVersion(),
1015 0 : ))
1016 : }
1017 : }
1018 1 : if o != nil && o.RangeKeyMasking.Suffix != nil && o.KeyTypes != IterKeyTypePointsAndRanges {
1019 0 : panic("pebble: range key masking requires IterKeyTypePointsAndRanges")
1020 : }
1021 1 : if (batch != nil || seqNum != 0) && (o != nil && o.OnlyReadGuaranteedDurable) {
1022 0 : // We could add support for OnlyReadGuaranteedDurable on snapshots if
1023 0 : // there was a need: this would require checking that the sequence number
1024 0 : // of the snapshot has been flushed, by comparing with
1025 0 : // DB.mem.queue[0].logSeqNum.
1026 0 : panic("OnlyReadGuaranteedDurable is not supported for batches or snapshots")
1027 : }
1028 : // Grab and reference the current readState. This prevents the underlying
1029 : // files in the associated version from being deleted if there is a current
1030 : // compaction. The readState is unref'd by Iterator.Close().
1031 1 : readState := sOpts.readState
1032 1 : if readState == nil && sOpts.vers == nil {
1033 1 : // NB: loadReadState() calls readState.ref().
1034 1 : readState = d.loadReadState()
1035 1 : } else if readState != nil {
1036 1 : readState.ref()
1037 1 : } else {
1038 1 : // s.vers != nil
1039 1 : sOpts.vers.Ref()
1040 1 : }
1041 :
1042 : // Determine the seqnum to read at after grabbing the read state (current and
1043 : // memtables) above.
1044 1 : if seqNum == 0 {
1045 1 : seqNum = d.mu.versions.visibleSeqNum.Load()
1046 1 : }
1047 :
1048 : // Bundle various structures under a single umbrella in order to allocate
1049 : // them together.
1050 1 : buf := iterAllocPool.Get().(*iterAlloc)
1051 1 : dbi := &buf.dbi
1052 1 : *dbi = Iterator{
1053 1 : ctx: ctx,
1054 1 : alloc: buf,
1055 1 : merge: d.merge,
1056 1 : comparer: *d.opts.Comparer,
1057 1 : readState: readState,
1058 1 : version: sOpts.vers,
1059 1 : keyBuf: buf.keyBuf,
1060 1 : prefixOrFullSeekKey: buf.prefixOrFullSeekKey,
1061 1 : boundsBuf: buf.boundsBuf,
1062 1 : batch: batch,
1063 1 : newIters: d.newIters,
1064 1 : newIterRangeKey: d.tableNewRangeKeyIter,
1065 1 : seqNum: seqNum,
1066 1 : }
1067 1 : if o != nil {
1068 1 : dbi.opts = *o
1069 1 : dbi.processBounds(o.LowerBound, o.UpperBound)
1070 1 : }
1071 1 : dbi.opts.logger = d.opts.Logger
1072 1 : if d.opts.private.disableLazyCombinedIteration {
1073 1 : dbi.opts.disableLazyCombinedIteration = true
1074 1 : }
1075 1 : if batch != nil {
1076 1 : dbi.batchSeqNum = dbi.batch.nextSeqNum()
1077 1 : }
1078 1 : return finishInitializingIter(ctx, buf)
1079 : }
1080 :
1081 : // finishInitializingIter is a helper for doing the non-trivial initialization
1082 : // of an Iterator. It's invoked to perform the initial initialization of an
1083 : // Iterator during NewIter or Clone, and to perform reinitialization due to a
1084 : // change in IterOptions by a call to Iterator.SetOptions.
1085 1 : func finishInitializingIter(ctx context.Context, buf *iterAlloc) *Iterator {
1086 1 : // Short-hand.
1087 1 : dbi := &buf.dbi
1088 1 : var memtables flushableList
1089 1 : if dbi.readState != nil {
1090 1 : memtables = dbi.readState.memtables
1091 1 : }
1092 1 : if dbi.opts.OnlyReadGuaranteedDurable {
1093 0 : memtables = nil
1094 1 : } else {
1095 1 : // We only need to read from memtables which contain sequence numbers older
1096 1 : // than seqNum. Trim off newer memtables.
1097 1 : for i := len(memtables) - 1; i >= 0; i-- {
1098 1 : if logSeqNum := memtables[i].logSeqNum; logSeqNum < dbi.seqNum {
1099 1 : break
1100 : }
1101 1 : memtables = memtables[:i]
1102 : }
1103 : }
1104 :
1105 1 : if dbi.opts.pointKeys() {
1106 1 : // Construct the point iterator, initializing dbi.pointIter to point to
1107 1 : // dbi.merging. If this is called during a SetOptions call and this
1108 1 : // Iterator has already initialized dbi.merging, constructPointIter is a
1109 1 : // noop and an initialized pointIter already exists in dbi.pointIter.
1110 1 : dbi.constructPointIter(ctx, memtables, buf)
1111 1 : dbi.iter = dbi.pointIter
1112 1 : } else {
1113 1 : dbi.iter = emptyIter
1114 1 : }
1115 :
1116 1 : if dbi.opts.rangeKeys() {
1117 1 : dbi.rangeKeyMasking.init(dbi, dbi.comparer.Compare, dbi.comparer.Split)
1118 1 :
1119 1 : // When iterating over both point and range keys, don't create the
1120 1 : // range-key iterator stack immediately if we can avoid it. This
1121 1 : // optimization takes advantage of the expected sparseness of range
1122 1 : // keys, and configures the point-key iterator to dynamically switch to
1123 1 : // combined iteration when it observes a file containing range keys.
1124 1 : //
1125 1 : // Lazy combined iteration is not possible if a batch or a memtable
1126 1 : // contains any range keys.
1127 1 : useLazyCombinedIteration := dbi.rangeKey == nil &&
1128 1 : dbi.opts.KeyTypes == IterKeyTypePointsAndRanges &&
1129 1 : (dbi.batch == nil || dbi.batch.countRangeKeys == 0) &&
1130 1 : !dbi.opts.disableLazyCombinedIteration
1131 1 : if useLazyCombinedIteration {
1132 1 : // The user requested combined iteration, and there's no indexed
1133 1 : // batch currently containing range keys that would prevent lazy
1134 1 : // combined iteration. Check the memtables to see if they contain
1135 1 : // any range keys.
1136 1 : for i := range memtables {
1137 1 : if memtables[i].containsRangeKeys() {
1138 1 : useLazyCombinedIteration = false
1139 1 : break
1140 : }
1141 : }
1142 : }
1143 :
1144 1 : if useLazyCombinedIteration {
1145 1 : dbi.lazyCombinedIter = lazyCombinedIter{
1146 1 : parent: dbi,
1147 1 : pointIter: dbi.pointIter,
1148 1 : combinedIterState: combinedIterState{
1149 1 : initialized: false,
1150 1 : },
1151 1 : }
1152 1 : dbi.iter = &dbi.lazyCombinedIter
1153 1 : dbi.iter = invalidating.MaybeWrapIfInvariants(dbi.iter)
1154 1 : } else {
1155 1 : dbi.lazyCombinedIter.combinedIterState = combinedIterState{
1156 1 : initialized: true,
1157 1 : }
1158 1 : if dbi.rangeKey == nil {
1159 1 : dbi.rangeKey = iterRangeKeyStateAllocPool.Get().(*iteratorRangeKeyState)
1160 1 : dbi.rangeKey.init(dbi.comparer.Compare, dbi.comparer.Split, &dbi.opts)
1161 1 : dbi.constructRangeKeyIter()
1162 1 : } else {
1163 1 : dbi.rangeKey.iterConfig.SetBounds(dbi.opts.LowerBound, dbi.opts.UpperBound)
1164 1 : }
1165 :
1166 : // Wrap the point iterator (currently dbi.iter) with an interleaving
1167 : // iterator that interleaves range keys pulled from
1168 : // dbi.rangeKey.rangeKeyIter.
1169 : //
1170 : // NB: The interleaving iterator is always reinitialized, even if
1171 : // dbi already had an initialized range key iterator, in case the point
1172 : // iterator changed or the range key masking suffix changed.
1173 1 : dbi.rangeKey.iiter.Init(&dbi.comparer, dbi.iter, dbi.rangeKey.rangeKeyIter,
1174 1 : keyspan.InterleavingIterOpts{
1175 1 : Mask: &dbi.rangeKeyMasking,
1176 1 : LowerBound: dbi.opts.LowerBound,
1177 1 : UpperBound: dbi.opts.UpperBound,
1178 1 : })
1179 1 : dbi.iter = &dbi.rangeKey.iiter
1180 : }
1181 1 : } else {
1182 1 : // !dbi.opts.rangeKeys()
1183 1 : //
1184 1 : // Reset the combined iterator state. The initialized=true ensures the
1185 1 : // iterator doesn't unnecessarily try to switch to combined iteration.
1186 1 : dbi.lazyCombinedIter.combinedIterState = combinedIterState{initialized: true}
1187 1 : }
1188 1 : return dbi
1189 : }
1190 :
1191 : // ScanInternal scans all internal keys within the specified bounds, truncating
1192 : // any rangedels and rangekeys to those bounds if they span past them. For use
1193 : // when an external user needs to be aware of all internal keys that make up a
1194 : // key range.
1195 : //
1196 : // Keys deleted by range deletions must not be returned or exposed by this
1197 : // method, while the range deletion deleting that key must be exposed using
1198 : // visitRangeDel. Keys that would be masked by range key masking (if an
1199 : // appropriate prefix were set) should be exposed, alongside the range key
1200 : // that would have masked it. This method also collapses all point keys into
1201 : // one InternalKey; so only one internal key at most per user key is returned
1202 : // to visitPointKey.
1203 : //
1204 : // If visitSharedFile is not nil, ScanInternal iterates in skip-shared iteration
1205 : // mode. In this iteration mode, sstables in levels L5 and L6 are skipped, and
1206 : // their metadatas truncated to [lower, upper) and passed into visitSharedFile.
1207 : // ErrInvalidSkipSharedIteration is returned if visitSharedFile is not nil and an
1208 : // sstable in L5 or L6 is found that is not in shared storage according to
1209 : // provider.IsShared, or an sstable in those levels contains a newer key than the
1210 : // snapshot sequence number (only applicable for snapshot.ScanInternal). Examples
1211 : // of when this could happen could be if Pebble started writing sstables before a
1212 : // creator ID was set (as creator IDs are necessary to enable shared storage)
1213 : // resulting in some lower level SSTs being on non-shared storage. Skip-shared
1214 : // iteration is invalid in those cases.
1215 : func (d *DB) ScanInternal(
1216 : ctx context.Context,
1217 : lower, upper []byte,
1218 : visitPointKey func(key *InternalKey, value LazyValue, iterInfo IteratorLevel) error,
1219 : visitRangeDel func(start, end []byte, seqNum uint64) error,
1220 : visitRangeKey func(start, end []byte, keys []rangekey.Key) error,
1221 : visitSharedFile func(sst *SharedSSTMeta) error,
1222 0 : ) error {
1223 0 : scanInternalOpts := &scanInternalOptions{
1224 0 : visitPointKey: visitPointKey,
1225 0 : visitRangeDel: visitRangeDel,
1226 0 : visitRangeKey: visitRangeKey,
1227 0 : visitSharedFile: visitSharedFile,
1228 0 : skipSharedLevels: visitSharedFile != nil,
1229 0 : IterOptions: IterOptions{
1230 0 : KeyTypes: IterKeyTypePointsAndRanges,
1231 0 : LowerBound: lower,
1232 0 : UpperBound: upper,
1233 0 : },
1234 0 : }
1235 0 : iter := d.newInternalIter(snapshotIterOpts{} /* snapshot */, scanInternalOpts)
1236 0 : defer iter.close()
1237 0 : return scanInternalImpl(ctx, lower, upper, iter, scanInternalOpts)
1238 0 : }
1239 :
1240 : // newInternalIter constructs and returns a new scanInternalIterator on this db.
1241 : // If o.skipSharedLevels is true, levels below sharedLevelsStart are *not* added
1242 : // to the internal iterator.
1243 : //
1244 : // TODO(bilal): This method has a lot of similarities with db.newIter as well as
1245 : // finishInitializingIter. Both pairs of methods should be refactored to reduce
1246 : // this duplication.
1247 0 : func (d *DB) newInternalIter(sOpts snapshotIterOpts, o *scanInternalOptions) *scanInternalIterator {
1248 0 : if err := d.closed.Load(); err != nil {
1249 0 : panic(err)
1250 : }
1251 : // Grab and reference the current readState. This prevents the underlying
1252 : // files in the associated version from being deleted if there is a current
1253 : // compaction. The readState is unref'd by Iterator.Close().
1254 0 : readState := sOpts.readState
1255 0 : if readState == nil && sOpts.vers == nil {
1256 0 : readState = d.loadReadState()
1257 0 : } else if readState != nil {
1258 0 : readState.ref()
1259 0 : }
1260 0 : if sOpts.vers != nil {
1261 0 : sOpts.vers.Ref()
1262 0 : }
1263 :
1264 : // Determine the seqnum to read at after grabbing the read state (current and
1265 : // memtables) above.
1266 0 : seqNum := sOpts.seqNum
1267 0 : if seqNum == 0 {
1268 0 : seqNum = d.mu.versions.visibleSeqNum.Load()
1269 0 : }
1270 :
1271 : // Bundle various structures under a single umbrella in order to allocate
1272 : // them together.
1273 0 : buf := iterAllocPool.Get().(*iterAlloc)
1274 0 : dbi := &scanInternalIterator{
1275 0 : db: d,
1276 0 : comparer: d.opts.Comparer,
1277 0 : merge: d.opts.Merger.Merge,
1278 0 : readState: readState,
1279 0 : version: sOpts.vers,
1280 0 : alloc: buf,
1281 0 : newIters: d.newIters,
1282 0 : newIterRangeKey: d.tableNewRangeKeyIter,
1283 0 : seqNum: seqNum,
1284 0 : mergingIter: &buf.merging,
1285 0 : }
1286 0 : if o != nil {
1287 0 : dbi.opts = *o
1288 0 : }
1289 0 : dbi.opts.logger = d.opts.Logger
1290 0 : if d.opts.private.disableLazyCombinedIteration {
1291 0 : dbi.opts.disableLazyCombinedIteration = true
1292 0 : }
1293 0 : return finishInitializingInternalIter(buf, dbi)
1294 : }
1295 :
1296 0 : func finishInitializingInternalIter(buf *iterAlloc, i *scanInternalIterator) *scanInternalIterator {
1297 0 : // Short-hand.
1298 0 : var memtables flushableList
1299 0 : if i.readState != nil {
1300 0 : memtables = i.readState.memtables
1301 0 : }
1302 : // We only need to read from memtables which contain sequence numbers older
1303 : // than seqNum. Trim off newer memtables.
1304 0 : for j := len(memtables) - 1; j >= 0; j-- {
1305 0 : if logSeqNum := memtables[j].logSeqNum; logSeqNum < i.seqNum {
1306 0 : break
1307 : }
1308 0 : memtables = memtables[:j]
1309 : }
1310 0 : i.initializeBoundBufs(i.opts.LowerBound, i.opts.UpperBound)
1311 0 :
1312 0 : i.constructPointIter(memtables, buf)
1313 0 :
1314 0 : // For internal iterators, we skip the lazy combined iteration optimization
1315 0 : // entirely, and create the range key iterator stack directly.
1316 0 : i.rangeKey = iterRangeKeyStateAllocPool.Get().(*iteratorRangeKeyState)
1317 0 : i.rangeKey.init(i.comparer.Compare, i.comparer.Split, &i.opts.IterOptions)
1318 0 : i.constructRangeKeyIter()
1319 0 :
1320 0 : // Wrap the point iterator (currently i.iter) with an interleaving
1321 0 : // iterator that interleaves range keys pulled from
1322 0 : // i.rangeKey.rangeKeyIter.
1323 0 : i.rangeKey.iiter.Init(i.comparer, i.iter, i.rangeKey.rangeKeyIter,
1324 0 : keyspan.InterleavingIterOpts{
1325 0 : LowerBound: i.opts.LowerBound,
1326 0 : UpperBound: i.opts.UpperBound,
1327 0 : })
1328 0 : i.iter = &i.rangeKey.iiter
1329 0 :
1330 0 : return i
1331 : }
1332 :
1333 : func (i *Iterator) constructPointIter(
1334 : ctx context.Context, memtables flushableList, buf *iterAlloc,
1335 1 : ) {
1336 1 : if i.pointIter != nil {
1337 1 : // Already have one.
1338 1 : return
1339 1 : }
1340 1 : internalOpts := internalIterOpts{stats: &i.stats.InternalStats}
1341 1 : if i.opts.RangeKeyMasking.Filter != nil {
1342 1 : internalOpts.boundLimitedFilter = &i.rangeKeyMasking
1343 1 : }
1344 :
1345 : // Merging levels and levels from iterAlloc.
1346 1 : mlevels := buf.mlevels[:0]
1347 1 : levels := buf.levels[:0]
1348 1 :
1349 1 : // We compute the number of levels needed ahead of time and reallocate a slice if
1350 1 : // the array from the iterAlloc isn't large enough. Doing this allocation once
1351 1 : // should improve the performance.
1352 1 : numMergingLevels := 0
1353 1 : numLevelIters := 0
1354 1 : if i.batch != nil {
1355 1 : numMergingLevels++
1356 1 : }
1357 1 : numMergingLevels += len(memtables)
1358 1 :
1359 1 : current := i.version
1360 1 : if current == nil {
1361 1 : current = i.readState.current
1362 1 : }
1363 1 : numMergingLevels += len(current.L0SublevelFiles)
1364 1 : numLevelIters += len(current.L0SublevelFiles)
1365 1 : for level := 1; level < len(current.Levels); level++ {
1366 1 : if current.Levels[level].Empty() {
1367 1 : continue
1368 : }
1369 1 : numMergingLevels++
1370 1 : numLevelIters++
1371 : }
1372 :
1373 1 : if numMergingLevels > cap(mlevels) {
1374 1 : mlevels = make([]mergingIterLevel, 0, numMergingLevels)
1375 1 : }
1376 1 : if numLevelIters > cap(levels) {
1377 1 : levels = make([]levelIter, 0, numLevelIters)
1378 1 : }
1379 :
1380 : // Top-level is the batch, if any.
1381 1 : if i.batch != nil {
1382 1 : if i.batch.index == nil {
1383 0 : // This isn't an indexed batch. Include an error iterator so that
1384 0 : // the resulting iterator correctly surfaces ErrIndexed.
1385 0 : mlevels = append(mlevels, mergingIterLevel{
1386 0 : iter: newErrorIter(ErrNotIndexed),
1387 0 : rangeDelIter: newErrorKeyspanIter(ErrNotIndexed),
1388 0 : })
1389 1 : } else {
1390 1 : i.batch.initInternalIter(&i.opts, &i.batchPointIter)
1391 1 : i.batch.initRangeDelIter(&i.opts, &i.batchRangeDelIter, i.batchSeqNum)
1392 1 : // Only include the batch's rangedel iterator if it's non-empty.
1393 1 : // This requires some subtle logic in the case a rangedel is later
1394 1 : // written to the batch and the view of the batch is refreshed
1395 1 : // during a call to SetOptions—in this case, we need to reconstruct
1396 1 : // the point iterator to add the batch rangedel iterator.
1397 1 : var rangeDelIter keyspan.FragmentIterator
1398 1 : if i.batchRangeDelIter.Count() > 0 {
1399 1 : rangeDelIter = &i.batchRangeDelIter
1400 1 : }
1401 1 : mlevels = append(mlevels, mergingIterLevel{
1402 1 : iter: &i.batchPointIter,
1403 1 : rangeDelIter: rangeDelIter,
1404 1 : })
1405 : }
1406 : }
1407 :
1408 : // Next are the memtables.
1409 1 : for j := len(memtables) - 1; j >= 0; j-- {
1410 1 : mem := memtables[j]
1411 1 : mlevels = append(mlevels, mergingIterLevel{
1412 1 : iter: mem.newIter(&i.opts),
1413 1 : rangeDelIter: mem.newRangeDelIter(&i.opts),
1414 1 : })
1415 1 : }
1416 :
1417 : // Next are the file levels: L0 sub-levels followed by lower levels.
1418 1 : mlevelsIndex := len(mlevels)
1419 1 : levelsIndex := len(levels)
1420 1 : mlevels = mlevels[:numMergingLevels]
1421 1 : levels = levels[:numLevelIters]
1422 1 : i.opts.snapshotForHideObsoletePoints = buf.dbi.seqNum
1423 1 : addLevelIterForFiles := func(files manifest.LevelIterator, level manifest.Level) {
1424 1 : li := &levels[levelsIndex]
1425 1 :
1426 1 : li.init(
1427 1 : ctx, i.opts, i.comparer.Compare, i.comparer.Split, i.newIters, files, level, internalOpts)
1428 1 : li.initRangeDel(&mlevels[mlevelsIndex].rangeDelIter)
1429 1 : li.initBoundaryContext(&mlevels[mlevelsIndex].levelIterBoundaryContext)
1430 1 : li.initCombinedIterState(&i.lazyCombinedIter.combinedIterState)
1431 1 : mlevels[mlevelsIndex].levelIter = li
1432 1 : mlevels[mlevelsIndex].iter = invalidating.MaybeWrapIfInvariants(li)
1433 1 :
1434 1 : levelsIndex++
1435 1 : mlevelsIndex++
1436 1 : }
1437 :
1438 : // Add level iterators for the L0 sublevels, iterating from newest to
1439 : // oldest.
1440 1 : for i := len(current.L0SublevelFiles) - 1; i >= 0; i-- {
1441 1 : addLevelIterForFiles(current.L0SublevelFiles[i].Iter(), manifest.L0Sublevel(i))
1442 1 : }
1443 :
1444 : // Add level iterators for the non-empty non-L0 levels.
1445 1 : for level := 1; level < len(current.Levels); level++ {
1446 1 : if current.Levels[level].Empty() {
1447 1 : continue
1448 : }
1449 1 : addLevelIterForFiles(current.Levels[level].Iter(), manifest.Level(level))
1450 : }
1451 1 : buf.merging.init(&i.opts, &i.stats.InternalStats, i.comparer.Compare, i.comparer.Split, mlevels...)
1452 1 : if len(mlevels) <= cap(buf.levelsPositioned) {
1453 1 : buf.merging.levelsPositioned = buf.levelsPositioned[:len(mlevels)]
1454 1 : }
1455 1 : buf.merging.snapshot = i.seqNum
1456 1 : buf.merging.batchSnapshot = i.batchSeqNum
1457 1 : buf.merging.combinedIterState = &i.lazyCombinedIter.combinedIterState
1458 1 : i.pointIter = invalidating.MaybeWrapIfInvariants(&buf.merging)
1459 1 : i.merging = &buf.merging
1460 : }
1461 :
1462 : // NewBatch returns a new empty write-only batch. Any reads on the batch will
1463 : // return an error. If the batch is committed it will be applied to the DB.
1464 1 : func (d *DB) NewBatch() *Batch {
1465 1 : return newBatch(d)
1466 1 : }
1467 :
1468 : // NewBatchWithSize is mostly identical to NewBatch, but it will allocate the
1469 : // the specified memory space for the internal slice in advance.
1470 0 : func (d *DB) NewBatchWithSize(size int) *Batch {
1471 0 : return newBatchWithSize(d, size)
1472 0 : }
1473 :
1474 : // NewIndexedBatch returns a new empty read-write batch. Any reads on the batch
1475 : // will read from both the batch and the DB. If the batch is committed it will
1476 : // be applied to the DB. An indexed batch is slower that a non-indexed batch
1477 : // for insert operations. If you do not need to perform reads on the batch, use
1478 : // NewBatch instead.
1479 1 : func (d *DB) NewIndexedBatch() *Batch {
1480 1 : return newIndexedBatch(d, d.opts.Comparer)
1481 1 : }
1482 :
1483 : // NewIndexedBatchWithSize is mostly identical to NewIndexedBatch, but it will
1484 : // allocate the the specified memory space for the internal slice in advance.
1485 0 : func (d *DB) NewIndexedBatchWithSize(size int) *Batch {
1486 0 : return newIndexedBatchWithSize(d, d.opts.Comparer, size)
1487 0 : }
1488 :
1489 : // NewIter returns an iterator that is unpositioned (Iterator.Valid() will
1490 : // return false). The iterator can be positioned via a call to SeekGE, SeekLT,
1491 : // First or Last. The iterator provides a point-in-time view of the current DB
1492 : // state. This view is maintained by preventing file deletions and preventing
1493 : // memtables referenced by the iterator from being deleted. Using an iterator
1494 : // to maintain a long-lived point-in-time view of the DB state can lead to an
1495 : // apparent memory and disk usage leak. Use snapshots (see NewSnapshot) for
1496 : // point-in-time snapshots which avoids these problems.
1497 1 : func (d *DB) NewIter(o *IterOptions) (*Iterator, error) {
1498 1 : return d.NewIterWithContext(context.Background(), o)
1499 1 : }
1500 :
1501 : // NewIterWithContext is like NewIter, and additionally accepts a context for
1502 : // tracing.
1503 1 : func (d *DB) NewIterWithContext(ctx context.Context, o *IterOptions) (*Iterator, error) {
1504 1 : return d.newIter(ctx, nil /* batch */, snapshotIterOpts{}, o), nil
1505 1 : }
1506 :
1507 : // NewSnapshot returns a point-in-time view of the current DB state. Iterators
1508 : // created with this handle will all observe a stable snapshot of the current
1509 : // DB state. The caller must call Snapshot.Close() when the snapshot is no
1510 : // longer needed. Snapshots are not persisted across DB restarts (close ->
1511 : // open). Unlike the implicit snapshot maintained by an iterator, a snapshot
1512 : // will not prevent memtables from being released or sstables from being
1513 : // deleted. Instead, a snapshot prevents deletion of sequence numbers
1514 : // referenced by the snapshot.
1515 1 : func (d *DB) NewSnapshot() *Snapshot {
1516 1 : if err := d.closed.Load(); err != nil {
1517 0 : panic(err)
1518 : }
1519 :
1520 1 : d.mu.Lock()
1521 1 : s := &Snapshot{
1522 1 : db: d,
1523 1 : seqNum: d.mu.versions.visibleSeqNum.Load(),
1524 1 : }
1525 1 : d.mu.snapshots.pushBack(s)
1526 1 : d.mu.Unlock()
1527 1 : return s
1528 : }
1529 :
1530 : // NewEventuallyFileOnlySnapshot returns a point-in-time view of the current DB
1531 : // state, similar to NewSnapshot, but with consistency constrained to the
1532 : // provided set of key ranges. See the comment at EventuallyFileOnlySnapshot for
1533 : // its semantics.
1534 1 : func (d *DB) NewEventuallyFileOnlySnapshot(keyRanges []KeyRange) *EventuallyFileOnlySnapshot {
1535 1 : if err := d.closed.Load(); err != nil {
1536 0 : panic(err)
1537 : }
1538 :
1539 1 : internalKeyRanges := make([]internalKeyRange, len(keyRanges))
1540 1 : for i := range keyRanges {
1541 1 : if i > 0 && d.cmp(keyRanges[i-1].End, keyRanges[i].Start) > 0 {
1542 0 : panic("pebble: key ranges for eventually-file-only-snapshot not in order")
1543 : }
1544 1 : internalKeyRanges[i] = internalKeyRange{
1545 1 : smallest: base.MakeInternalKey(keyRanges[i].Start, InternalKeySeqNumMax, InternalKeyKindMax),
1546 1 : largest: base.MakeExclusiveSentinelKey(InternalKeyKindRangeDelete, keyRanges[i].End),
1547 1 : }
1548 : }
1549 :
1550 1 : return d.makeEventuallyFileOnlySnapshot(keyRanges, internalKeyRanges)
1551 : }
1552 :
1553 : // Close closes the DB.
1554 : //
1555 : // It is not safe to close a DB until all outstanding iterators are closed
1556 : // or to call Close concurrently with any other DB method. It is not valid
1557 : // to call any of a DB's methods after the DB has been closed.
1558 1 : func (d *DB) Close() error {
1559 1 : // Lock the commit pipeline for the duration of Close. This prevents a race
1560 1 : // with makeRoomForWrite. Rotating the WAL in makeRoomForWrite requires
1561 1 : // dropping d.mu several times for I/O. If Close only holds d.mu, an
1562 1 : // in-progress WAL rotation may re-acquire d.mu only once the database is
1563 1 : // closed.
1564 1 : //
1565 1 : // Additionally, locking the commit pipeline makes it more likely that
1566 1 : // (illegal) concurrent writes will observe d.closed.Load() != nil, creating
1567 1 : // more understable panics if the database is improperly used concurrently
1568 1 : // during Close.
1569 1 : d.commit.mu.Lock()
1570 1 : defer d.commit.mu.Unlock()
1571 1 : d.mu.Lock()
1572 1 : defer d.mu.Unlock()
1573 1 : if err := d.closed.Load(); err != nil {
1574 0 : panic(err)
1575 : }
1576 :
1577 : // Clear the finalizer that is used to check that an unreferenced DB has been
1578 : // closed. We're closing the DB here, so the check performed by that
1579 : // finalizer isn't necessary.
1580 : //
1581 : // Note: this is a no-op if invariants are disabled or race is enabled.
1582 1 : invariants.SetFinalizer(d.closed, nil)
1583 1 :
1584 1 : d.closed.Store(errors.WithStack(ErrClosed))
1585 1 : close(d.closedCh)
1586 1 :
1587 1 : defer d.opts.Cache.Unref()
1588 1 :
1589 1 : for d.mu.compact.compactingCount > 0 || d.mu.compact.flushing {
1590 1 : d.mu.compact.cond.Wait()
1591 1 : }
1592 1 : for d.mu.tableStats.loading {
1593 1 : d.mu.tableStats.cond.Wait()
1594 1 : }
1595 1 : for d.mu.tableValidation.validating {
1596 1 : d.mu.tableValidation.cond.Wait()
1597 1 : }
1598 :
1599 1 : var err error
1600 1 : if n := len(d.mu.compact.inProgress); n > 0 {
1601 0 : err = errors.Errorf("pebble: %d unexpected in-progress compactions", errors.Safe(n))
1602 0 : }
1603 1 : err = firstError(err, d.mu.formatVers.marker.Close())
1604 1 : err = firstError(err, d.tableCache.close())
1605 1 : if !d.opts.ReadOnly {
1606 1 : err = firstError(err, d.mu.log.Close())
1607 1 : } else if d.mu.log.LogWriter != nil {
1608 0 : panic("pebble: log-writer should be nil in read-only mode")
1609 : }
1610 1 : err = firstError(err, d.fileLock.Close())
1611 1 :
1612 1 : // Note that versionSet.close() only closes the MANIFEST. The versions list
1613 1 : // is still valid for the checks below.
1614 1 : err = firstError(err, d.mu.versions.close())
1615 1 :
1616 1 : err = firstError(err, d.dataDir.Close())
1617 1 : if d.dataDir != d.walDir {
1618 1 : err = firstError(err, d.walDir.Close())
1619 1 : }
1620 :
1621 1 : d.readState.val.unrefLocked()
1622 1 :
1623 1 : current := d.mu.versions.currentVersion()
1624 1 : for v := d.mu.versions.versions.Front(); true; v = v.Next() {
1625 1 : refs := v.Refs()
1626 1 : if v == current {
1627 1 : if refs != 1 {
1628 0 : err = firstError(err, errors.Errorf("leaked iterators: current\n%s", v))
1629 0 : }
1630 1 : break
1631 : }
1632 0 : if refs != 0 {
1633 0 : err = firstError(err, errors.Errorf("leaked iterators:\n%s", v))
1634 0 : }
1635 : }
1636 :
1637 1 : for _, mem := range d.mu.mem.queue {
1638 1 : // Usually, we'd want to delete the files returned by readerUnref. But
1639 1 : // in this case, even if we're unreferencing the flushables, the
1640 1 : // flushables aren't obsolete. They will be reconstructed during WAL
1641 1 : // replay.
1642 1 : mem.readerUnrefLocked(false)
1643 1 : }
1644 : // If there's an unused, recycled memtable, we need to release its memory.
1645 1 : if obsoleteMemTable := d.memTableRecycle.Swap(nil); obsoleteMemTable != nil {
1646 1 : d.freeMemTable(obsoleteMemTable)
1647 1 : }
1648 1 : if reserved := d.memTableReserved.Load(); reserved != 0 {
1649 0 : err = firstError(err, errors.Errorf("leaked memtable reservation: %d", errors.Safe(reserved)))
1650 0 : }
1651 :
1652 : // Since we called d.readState.val.unrefLocked() above, we are expected to
1653 : // manually schedule deletion of obsolete files.
1654 1 : if len(d.mu.versions.obsoleteTables) > 0 {
1655 1 : d.deleteObsoleteFiles(d.mu.nextJobID)
1656 1 : }
1657 :
1658 1 : d.mu.Unlock()
1659 1 : d.compactionSchedulers.Wait()
1660 1 :
1661 1 : // Wait for all cleaning jobs to finish.
1662 1 : d.cleanupManager.Close()
1663 1 :
1664 1 : // Sanity check metrics.
1665 1 : if invariants.Enabled {
1666 1 : m := d.Metrics()
1667 1 : if m.Compact.NumInProgress > 0 || m.Compact.InProgressBytes > 0 {
1668 0 : d.mu.Lock()
1669 0 : panic(fmt.Sprintf("invalid metrics on close:\n%s", m))
1670 : }
1671 : }
1672 :
1673 1 : d.mu.Lock()
1674 1 :
1675 1 : // As a sanity check, ensure that there are no zombie tables. A non-zero count
1676 1 : // hints at a reference count leak.
1677 1 : if ztbls := len(d.mu.versions.zombieTables); ztbls > 0 {
1678 0 : err = firstError(err, errors.Errorf("non-zero zombie file count: %d", ztbls))
1679 0 : }
1680 :
1681 1 : err = firstError(err, d.objProvider.Close())
1682 1 :
1683 1 : // If the options include a closer to 'close' the filesystem, close it.
1684 1 : if d.opts.private.fsCloser != nil {
1685 1 : d.opts.private.fsCloser.Close()
1686 1 : }
1687 :
1688 : // Return an error if the user failed to close all open snapshots.
1689 1 : if v := d.mu.snapshots.count(); v > 0 {
1690 0 : err = firstError(err, errors.Errorf("leaked snapshots: %d open snapshots on DB %p", v, d))
1691 0 : }
1692 :
1693 1 : return err
1694 : }
1695 :
1696 : // Compact the specified range of keys in the database.
1697 1 : func (d *DB) Compact(start, end []byte, parallelize bool) error {
1698 1 : if err := d.closed.Load(); err != nil {
1699 0 : panic(err)
1700 : }
1701 1 : if d.opts.ReadOnly {
1702 0 : return ErrReadOnly
1703 0 : }
1704 1 : if d.cmp(start, end) >= 0 {
1705 1 : return errors.Errorf("Compact start %s is not less than end %s",
1706 1 : d.opts.Comparer.FormatKey(start), d.opts.Comparer.FormatKey(end))
1707 1 : }
1708 1 : iStart := base.MakeInternalKey(start, InternalKeySeqNumMax, InternalKeyKindMax)
1709 1 : iEnd := base.MakeInternalKey(end, 0, 0)
1710 1 : m := (&fileMetadata{}).ExtendPointKeyBounds(d.cmp, iStart, iEnd)
1711 1 : meta := []*fileMetadata{m}
1712 1 :
1713 1 : d.mu.Lock()
1714 1 : maxLevelWithFiles := 1
1715 1 : cur := d.mu.versions.currentVersion()
1716 1 : for level := 0; level < numLevels; level++ {
1717 1 : overlaps := cur.Overlaps(level, d.cmp, start, end, iEnd.IsExclusiveSentinel())
1718 1 : if !overlaps.Empty() {
1719 1 : maxLevelWithFiles = level + 1
1720 1 : }
1721 : }
1722 :
1723 1 : keyRanges := make([]internalKeyRange, len(meta))
1724 1 : for i := range meta {
1725 1 : keyRanges[i] = internalKeyRange{smallest: m.Smallest, largest: m.Largest}
1726 1 : }
1727 : // Determine if any memtable overlaps with the compaction range. We wait for
1728 : // any such overlap to flush (initiating a flush if necessary).
1729 1 : mem, err := func() (*flushableEntry, error) {
1730 1 : // Check to see if any files overlap with any of the memtables. The queue
1731 1 : // is ordered from oldest to newest with the mutable memtable being the
1732 1 : // last element in the slice. We want to wait for the newest table that
1733 1 : // overlaps.
1734 1 : for i := len(d.mu.mem.queue) - 1; i >= 0; i-- {
1735 1 : mem := d.mu.mem.queue[i]
1736 1 : if ingestMemtableOverlaps(d.cmp, mem, keyRanges) {
1737 1 : var err error
1738 1 : if mem.flushable == d.mu.mem.mutable {
1739 1 : // We have to hold both commitPipeline.mu and DB.mu when calling
1740 1 : // makeRoomForWrite(). Lock order requirements elsewhere force us to
1741 1 : // unlock DB.mu in order to grab commitPipeline.mu first.
1742 1 : d.mu.Unlock()
1743 1 : d.commit.mu.Lock()
1744 1 : d.mu.Lock()
1745 1 : defer d.commit.mu.Unlock()
1746 1 : if mem.flushable == d.mu.mem.mutable {
1747 1 : // Only flush if the active memtable is unchanged.
1748 1 : err = d.makeRoomForWrite(nil)
1749 1 : }
1750 : }
1751 1 : mem.flushForced = true
1752 1 : d.maybeScheduleFlush()
1753 1 : return mem, err
1754 : }
1755 : }
1756 1 : return nil, nil
1757 : }()
1758 :
1759 1 : d.mu.Unlock()
1760 1 :
1761 1 : if err != nil {
1762 0 : return err
1763 0 : }
1764 1 : if mem != nil {
1765 1 : <-mem.flushed
1766 1 : }
1767 :
1768 1 : for level := 0; level < maxLevelWithFiles; {
1769 1 : if err := d.manualCompact(
1770 1 : iStart.UserKey, iEnd.UserKey, level, parallelize); err != nil {
1771 0 : return err
1772 0 : }
1773 1 : level++
1774 1 : if level == numLevels-1 {
1775 1 : // A manual compaction of the bottommost level occurred.
1776 1 : // There is no next level to try and compact.
1777 1 : break
1778 : }
1779 : }
1780 1 : return nil
1781 : }
1782 :
1783 1 : func (d *DB) manualCompact(start, end []byte, level int, parallelize bool) error {
1784 1 : d.mu.Lock()
1785 1 : curr := d.mu.versions.currentVersion()
1786 1 : files := curr.Overlaps(level, d.cmp, start, end, false)
1787 1 : if files.Empty() {
1788 1 : d.mu.Unlock()
1789 1 : return nil
1790 1 : }
1791 :
1792 1 : var compactions []*manualCompaction
1793 1 : if parallelize {
1794 1 : compactions = append(compactions, d.splitManualCompaction(start, end, level)...)
1795 1 : } else {
1796 1 : compactions = append(compactions, &manualCompaction{
1797 1 : level: level,
1798 1 : done: make(chan error, 1),
1799 1 : start: start,
1800 1 : end: end,
1801 1 : })
1802 1 : }
1803 1 : d.mu.compact.manual = append(d.mu.compact.manual, compactions...)
1804 1 : d.maybeScheduleCompaction()
1805 1 : d.mu.Unlock()
1806 1 :
1807 1 : // Each of the channels is guaranteed to be eventually sent to once. After a
1808 1 : // compaction is possibly picked in d.maybeScheduleCompaction(), either the
1809 1 : // compaction is dropped, executed after being scheduled, or retried later.
1810 1 : // Assuming eventual progress when a compaction is retried, all outcomes send
1811 1 : // a value to the done channel. Since the channels are buffered, it is not
1812 1 : // necessary to read from each channel, and so we can exit early in the event
1813 1 : // of an error.
1814 1 : for _, compaction := range compactions {
1815 1 : if err := <-compaction.done; err != nil {
1816 0 : return err
1817 0 : }
1818 : }
1819 1 : return nil
1820 : }
1821 :
1822 : // splitManualCompaction splits a manual compaction over [start,end] on level
1823 : // such that the resulting compactions have no key overlap.
1824 : func (d *DB) splitManualCompaction(
1825 : start, end []byte, level int,
1826 1 : ) (splitCompactions []*manualCompaction) {
1827 1 : curr := d.mu.versions.currentVersion()
1828 1 : endLevel := level + 1
1829 1 : baseLevel := d.mu.versions.picker.getBaseLevel()
1830 1 : if level == 0 {
1831 1 : endLevel = baseLevel
1832 1 : }
1833 1 : keyRanges := calculateInuseKeyRanges(curr, d.cmp, level, endLevel, start, end)
1834 1 : for _, keyRange := range keyRanges {
1835 1 : splitCompactions = append(splitCompactions, &manualCompaction{
1836 1 : level: level,
1837 1 : done: make(chan error, 1),
1838 1 : start: keyRange.Start,
1839 1 : end: keyRange.End,
1840 1 : split: true,
1841 1 : })
1842 1 : }
1843 1 : return splitCompactions
1844 : }
1845 :
1846 : // DownloadSpan is a key range passed to the Download method.
1847 : type DownloadSpan struct {
1848 : StartKey []byte
1849 : // EndKey is exclusive.
1850 : EndKey []byte
1851 : }
1852 :
1853 : // Download ensures that the LSM does not use any external sstables for the
1854 : // given key ranges. It does so by performing appropriate compactions so that
1855 : // all external data becomes available locally.
1856 : //
1857 : // Note that calling this method does not imply that all other compactions stop;
1858 : // it simply informs Pebble of a list of spans for which external data should be
1859 : // downloaded with high priority.
1860 : //
1861 : // The method returns once no external sstasbles overlap the given spans, the
1862 : // context is canceled, or an error is hit.
1863 : //
1864 : // TODO(radu): consider passing a priority/impact knob to express how important
1865 : // the download is (versus live traffic performance, LSM health).
1866 0 : func (d *DB) Download(ctx context.Context, spans []DownloadSpan) error {
1867 0 : return errors.Errorf("not implemented")
1868 0 : }
1869 :
1870 : // Flush the memtable to stable storage.
1871 1 : func (d *DB) Flush() error {
1872 1 : flushDone, err := d.AsyncFlush()
1873 1 : if err != nil {
1874 0 : return err
1875 0 : }
1876 1 : <-flushDone
1877 1 : return nil
1878 : }
1879 :
1880 : // AsyncFlush asynchronously flushes the memtable to stable storage.
1881 : //
1882 : // If no error is returned, the caller can receive from the returned channel in
1883 : // order to wait for the flush to complete.
1884 1 : func (d *DB) AsyncFlush() (<-chan struct{}, error) {
1885 1 : if err := d.closed.Load(); err != nil {
1886 0 : panic(err)
1887 : }
1888 1 : if d.opts.ReadOnly {
1889 0 : return nil, ErrReadOnly
1890 0 : }
1891 :
1892 1 : d.commit.mu.Lock()
1893 1 : defer d.commit.mu.Unlock()
1894 1 : d.mu.Lock()
1895 1 : defer d.mu.Unlock()
1896 1 : flushed := d.mu.mem.queue[len(d.mu.mem.queue)-1].flushed
1897 1 : err := d.makeRoomForWrite(nil)
1898 1 : if err != nil {
1899 0 : return nil, err
1900 0 : }
1901 1 : return flushed, nil
1902 : }
1903 :
1904 : // Metrics returns metrics about the database.
1905 1 : func (d *DB) Metrics() *Metrics {
1906 1 : metrics := &Metrics{}
1907 1 : recycledLogsCount, recycledLogSize := d.logRecycler.stats()
1908 1 :
1909 1 : d.mu.Lock()
1910 1 : vers := d.mu.versions.currentVersion()
1911 1 : *metrics = d.mu.versions.metrics
1912 1 : metrics.Compact.EstimatedDebt = d.mu.versions.picker.estimatedCompactionDebt(0)
1913 1 : metrics.Compact.InProgressBytes = d.mu.versions.atomicInProgressBytes.Load()
1914 1 : metrics.Compact.NumInProgress = int64(d.mu.compact.compactingCount)
1915 1 : metrics.Compact.MarkedFiles = vers.Stats.MarkedForCompaction
1916 1 : metrics.Compact.Duration = d.mu.compact.duration
1917 1 : for c := range d.mu.compact.inProgress {
1918 0 : if c.kind != compactionKindFlush {
1919 0 : metrics.Compact.Duration += d.timeNow().Sub(c.beganAt)
1920 0 : }
1921 : }
1922 :
1923 1 : for _, m := range d.mu.mem.queue {
1924 1 : metrics.MemTable.Size += m.totalBytes()
1925 1 : }
1926 1 : metrics.Snapshots.Count = d.mu.snapshots.count()
1927 1 : if metrics.Snapshots.Count > 0 {
1928 0 : metrics.Snapshots.EarliestSeqNum = d.mu.snapshots.earliest()
1929 0 : }
1930 1 : metrics.Snapshots.PinnedKeys = d.mu.snapshots.cumulativePinnedCount
1931 1 : metrics.Snapshots.PinnedSize = d.mu.snapshots.cumulativePinnedSize
1932 1 : metrics.MemTable.Count = int64(len(d.mu.mem.queue))
1933 1 : metrics.MemTable.ZombieCount = d.memTableCount.Load() - metrics.MemTable.Count
1934 1 : metrics.MemTable.ZombieSize = uint64(d.memTableReserved.Load()) - metrics.MemTable.Size
1935 1 : metrics.WAL.ObsoleteFiles = int64(recycledLogsCount)
1936 1 : metrics.WAL.ObsoletePhysicalSize = recycledLogSize
1937 1 : metrics.WAL.Size = d.logSize.Load()
1938 1 : // The current WAL size (d.atomic.logSize) is the current logical size,
1939 1 : // which may be less than the WAL's physical size if it was recycled.
1940 1 : // The file sizes in d.mu.log.queue are updated to the physical size
1941 1 : // during WAL rotation. Use the larger of the two for the current WAL. All
1942 1 : // the previous WALs's fileSizes in d.mu.log.queue are already updated.
1943 1 : metrics.WAL.PhysicalSize = metrics.WAL.Size
1944 1 : if len(d.mu.log.queue) > 0 && metrics.WAL.PhysicalSize < d.mu.log.queue[len(d.mu.log.queue)-1].fileSize {
1945 0 : metrics.WAL.PhysicalSize = d.mu.log.queue[len(d.mu.log.queue)-1].fileSize
1946 0 : }
1947 1 : for i, n := 0, len(d.mu.log.queue)-1; i < n; i++ {
1948 1 : metrics.WAL.PhysicalSize += d.mu.log.queue[i].fileSize
1949 1 : }
1950 :
1951 1 : metrics.WAL.BytesIn = d.mu.log.bytesIn // protected by d.mu
1952 1 : for i, n := 0, len(d.mu.mem.queue)-1; i < n; i++ {
1953 1 : metrics.WAL.Size += d.mu.mem.queue[i].logSize
1954 1 : }
1955 1 : metrics.WAL.BytesWritten = metrics.Levels[0].BytesIn + metrics.WAL.Size
1956 1 : if p := d.mu.versions.picker; p != nil {
1957 1 : compactions := d.getInProgressCompactionInfoLocked(nil)
1958 1 : for level, score := range p.getScores(compactions) {
1959 1 : metrics.Levels[level].Score = score
1960 1 : }
1961 : }
1962 1 : metrics.Table.ZombieCount = int64(len(d.mu.versions.zombieTables))
1963 1 : for _, size := range d.mu.versions.zombieTables {
1964 0 : metrics.Table.ZombieSize += size
1965 0 : }
1966 1 : metrics.private.optionsFileSize = d.optionsFileSize
1967 1 :
1968 1 : // TODO(jackson): Consider making these metrics optional.
1969 1 : metrics.Keys.RangeKeySetsCount = countRangeKeySetFragments(vers)
1970 1 : metrics.Keys.TombstoneCount = countTombstones(vers)
1971 1 :
1972 1 : d.mu.versions.logLock()
1973 1 : metrics.private.manifestFileSize = uint64(d.mu.versions.manifest.Size())
1974 1 : d.mu.versions.logUnlock()
1975 1 :
1976 1 : metrics.LogWriter.FsyncLatency = d.mu.log.metrics.fsyncLatency
1977 1 : if err := metrics.LogWriter.Merge(&d.mu.log.metrics.LogWriterMetrics); err != nil {
1978 0 : d.opts.Logger.Infof("metrics error: %s", err)
1979 0 : }
1980 1 : metrics.Flush.WriteThroughput = d.mu.compact.flushWriteThroughput
1981 1 : if d.mu.compact.flushing {
1982 0 : metrics.Flush.NumInProgress = 1
1983 0 : }
1984 1 : for i := 0; i < numLevels; i++ {
1985 1 : metrics.Levels[i].Additional.ValueBlocksSize = valueBlocksSizeForLevel(vers, i)
1986 1 : }
1987 :
1988 1 : d.mu.Unlock()
1989 1 :
1990 1 : metrics.BlockCache = d.opts.Cache.Metrics()
1991 1 : metrics.TableCache, metrics.Filter = d.tableCache.metrics()
1992 1 : metrics.TableIters = int64(d.tableCache.iterCount())
1993 1 :
1994 1 : metrics.SecondaryCacheMetrics = d.objProvider.Metrics()
1995 1 :
1996 1 : metrics.Uptime = d.timeNow().Sub(d.openedAt)
1997 1 :
1998 1 : return metrics
1999 : }
2000 :
2001 : // sstablesOptions hold the optional parameters to retrieve TableInfo for all sstables.
2002 : type sstablesOptions struct {
2003 : // set to true will return the sstable properties in TableInfo
2004 : withProperties bool
2005 :
2006 : // if set, return sstables that overlap the key range (end-exclusive)
2007 : start []byte
2008 : end []byte
2009 :
2010 : withApproximateSpanBytes bool
2011 : }
2012 :
2013 : // SSTablesOption set optional parameter used by `DB.SSTables`.
2014 : type SSTablesOption func(*sstablesOptions)
2015 :
2016 : // WithProperties enable return sstable properties in each TableInfo.
2017 : //
2018 : // NOTE: if most of the sstable properties need to be read from disk,
2019 : // this options may make method `SSTables` quite slow.
2020 0 : func WithProperties() SSTablesOption {
2021 0 : return func(opt *sstablesOptions) {
2022 0 : opt.withProperties = true
2023 0 : }
2024 : }
2025 :
2026 : // WithKeyRangeFilter ensures returned sstables overlap start and end (end-exclusive)
2027 : // if start and end are both nil these properties have no effect.
2028 0 : func WithKeyRangeFilter(start, end []byte) SSTablesOption {
2029 0 : return func(opt *sstablesOptions) {
2030 0 : opt.end = end
2031 0 : opt.start = start
2032 0 : }
2033 : }
2034 :
2035 : // WithApproximateSpanBytes enables capturing the approximate number of bytes that
2036 : // overlap the provided key span for each sstable.
2037 : // NOTE: this option can only be used with WithKeyRangeFilter and WithProperties
2038 : // provided.
2039 0 : func WithApproximateSpanBytes() SSTablesOption {
2040 0 : return func(opt *sstablesOptions) {
2041 0 : opt.withApproximateSpanBytes = true
2042 0 : }
2043 : }
2044 :
2045 : // BackingType denotes the type of storage backing a given sstable.
2046 : type BackingType int
2047 :
2048 : const (
2049 : // BackingTypeLocal denotes an sstable stored on local disk according to the
2050 : // objprovider. This file is completely owned by us.
2051 : BackingTypeLocal BackingType = iota
2052 : // BackingTypeShared denotes an sstable stored on shared storage, created
2053 : // by this Pebble instance and possibly shared by other Pebble instances.
2054 : // These types of files have lifecycle managed by Pebble.
2055 : BackingTypeShared
2056 : // BackingTypeSharedForeign denotes an sstable stored on shared storage,
2057 : // created by a Pebble instance other than this one. These types of files have
2058 : // lifecycle managed by Pebble.
2059 : BackingTypeSharedForeign
2060 : // BackingTypeExternal denotes an sstable stored on external storage,
2061 : // not owned by any Pebble instance and with no refcounting/cleanup methods
2062 : // or lifecycle management. An example of an external file is a file restored
2063 : // from a backup.
2064 : BackingTypeExternal
2065 : )
2066 :
2067 : // SSTableInfo export manifest.TableInfo with sstable.Properties alongside
2068 : // other file backing info.
2069 : type SSTableInfo struct {
2070 : manifest.TableInfo
2071 : // Virtual indicates whether the sstable is virtual.
2072 : Virtual bool
2073 : // BackingSSTNum is the file number associated with backing sstable which
2074 : // backs the sstable associated with this SSTableInfo. If Virtual is false,
2075 : // then BackingSSTNum == FileNum.
2076 : BackingSSTNum base.FileNum
2077 : // BackingType is the type of storage backing this sstable.
2078 : BackingType BackingType
2079 : // Locator is the remote.Locator backing this sstable, if the backing type is
2080 : // not BackingTypeLocal.
2081 : Locator remote.Locator
2082 :
2083 : // Properties is the sstable properties of this table. If Virtual is true,
2084 : // then the Properties are associated with the backing sst.
2085 : Properties *sstable.Properties
2086 : }
2087 :
2088 : // SSTables retrieves the current sstables. The returned slice is indexed by
2089 : // level and each level is indexed by the position of the sstable within the
2090 : // level. Note that this information may be out of date due to concurrent
2091 : // flushes and compactions.
2092 0 : func (d *DB) SSTables(opts ...SSTablesOption) ([][]SSTableInfo, error) {
2093 0 : opt := &sstablesOptions{}
2094 0 : for _, fn := range opts {
2095 0 : fn(opt)
2096 0 : }
2097 :
2098 0 : if opt.withApproximateSpanBytes && !opt.withProperties {
2099 0 : return nil, errors.Errorf("Cannot use WithApproximateSpanBytes without WithProperties option.")
2100 0 : }
2101 0 : if opt.withApproximateSpanBytes && (opt.start == nil || opt.end == nil) {
2102 0 : return nil, errors.Errorf("Cannot use WithApproximateSpanBytes without WithKeyRangeFilter option.")
2103 0 : }
2104 :
2105 : // Grab and reference the current readState.
2106 0 : readState := d.loadReadState()
2107 0 : defer readState.unref()
2108 0 :
2109 0 : // TODO(peter): This is somewhat expensive, especially on a large
2110 0 : // database. It might be worthwhile to unify TableInfo and FileMetadata and
2111 0 : // then we could simply return current.Files. Note that RocksDB is doing
2112 0 : // something similar to the current code, so perhaps it isn't too bad.
2113 0 : srcLevels := readState.current.Levels
2114 0 : var totalTables int
2115 0 : for i := range srcLevels {
2116 0 : totalTables += srcLevels[i].Len()
2117 0 : }
2118 :
2119 0 : destTables := make([]SSTableInfo, totalTables)
2120 0 : destLevels := make([][]SSTableInfo, len(srcLevels))
2121 0 : for i := range destLevels {
2122 0 : iter := srcLevels[i].Iter()
2123 0 : j := 0
2124 0 : for m := iter.First(); m != nil; m = iter.Next() {
2125 0 : if opt.start != nil && opt.end != nil && !m.Overlaps(d.opts.Comparer.Compare, opt.start, opt.end, true /* exclusive end */) {
2126 0 : continue
2127 : }
2128 0 : destTables[j] = SSTableInfo{TableInfo: m.TableInfo()}
2129 0 : if opt.withProperties {
2130 0 : p, err := d.tableCache.getTableProperties(
2131 0 : m,
2132 0 : )
2133 0 : if err != nil {
2134 0 : return nil, err
2135 0 : }
2136 0 : destTables[j].Properties = p
2137 : }
2138 0 : destTables[j].Virtual = m.Virtual
2139 0 : destTables[j].BackingSSTNum = m.FileBacking.DiskFileNum.FileNum()
2140 0 : objMeta, err := d.objProvider.Lookup(fileTypeTable, m.FileBacking.DiskFileNum)
2141 0 : if err != nil {
2142 0 : return nil, err
2143 0 : }
2144 0 : if objMeta.IsRemote() {
2145 0 : if objMeta.IsShared() {
2146 0 : if d.objProvider.IsSharedForeign(objMeta) {
2147 0 : destTables[j].BackingType = BackingTypeSharedForeign
2148 0 : } else {
2149 0 : destTables[j].BackingType = BackingTypeShared
2150 0 : }
2151 0 : } else {
2152 0 : destTables[j].BackingType = BackingTypeExternal
2153 0 : }
2154 0 : destTables[j].Locator = objMeta.Remote.Locator
2155 0 : } else {
2156 0 : destTables[j].BackingType = BackingTypeLocal
2157 0 : }
2158 :
2159 0 : if opt.withApproximateSpanBytes {
2160 0 : var spanBytes uint64
2161 0 : if m.ContainedWithinSpan(d.opts.Comparer.Compare, opt.start, opt.end) {
2162 0 : spanBytes = m.Size
2163 0 : } else {
2164 0 : size, err := d.tableCache.estimateSize(m, opt.start, opt.end)
2165 0 : if err != nil {
2166 0 : return nil, err
2167 0 : }
2168 0 : spanBytes = size
2169 : }
2170 0 : propertiesCopy := *destTables[j].Properties
2171 0 :
2172 0 : // Deep copy user properties so approximate span bytes can be added.
2173 0 : propertiesCopy.UserProperties = make(map[string]string, len(destTables[j].Properties.UserProperties)+1)
2174 0 : for k, v := range destTables[j].Properties.UserProperties {
2175 0 : propertiesCopy.UserProperties[k] = v
2176 0 : }
2177 0 : propertiesCopy.UserProperties["approximate-span-bytes"] = strconv.FormatUint(spanBytes, 10)
2178 0 : destTables[j].Properties = &propertiesCopy
2179 : }
2180 0 : j++
2181 : }
2182 0 : destLevels[i] = destTables[:j]
2183 0 : destTables = destTables[j:]
2184 : }
2185 :
2186 0 : return destLevels, nil
2187 : }
2188 :
2189 : // EstimateDiskUsage returns the estimated filesystem space used in bytes for
2190 : // storing the range `[start, end]`. The estimation is computed as follows:
2191 : //
2192 : // - For sstables fully contained in the range the whole file size is included.
2193 : // - For sstables partially contained in the range the overlapping data block sizes
2194 : // are included. Even if a data block partially overlaps, or we cannot determine
2195 : // overlap due to abbreviated index keys, the full data block size is included in
2196 : // the estimation. Note that unlike fully contained sstables, none of the
2197 : // meta-block space is counted for partially overlapped files.
2198 : // - For virtual sstables, we use the overlap between start, end and the virtual
2199 : // sstable bounds to determine disk usage.
2200 : // - There may also exist WAL entries for unflushed keys in this range. This
2201 : // estimation currently excludes space used for the range in the WAL.
2202 0 : func (d *DB) EstimateDiskUsage(start, end []byte) (uint64, error) {
2203 0 : bytes, _, _, err := d.EstimateDiskUsageByBackingType(start, end)
2204 0 : return bytes, err
2205 0 : }
2206 :
2207 : // EstimateDiskUsageByBackingType is like EstimateDiskUsage but additionally
2208 : // returns the subsets of that size in remote ane external files.
2209 : func (d *DB) EstimateDiskUsageByBackingType(
2210 : start, end []byte,
2211 0 : ) (totalSize, remoteSize, externalSize uint64, _ error) {
2212 0 : if err := d.closed.Load(); err != nil {
2213 0 : panic(err)
2214 : }
2215 0 : if d.opts.Comparer.Compare(start, end) > 0 {
2216 0 : return 0, 0, 0, errors.New("invalid key-range specified (start > end)")
2217 0 : }
2218 :
2219 : // Grab and reference the current readState. This prevents the underlying
2220 : // files in the associated version from being deleted if there is a concurrent
2221 : // compaction.
2222 0 : readState := d.loadReadState()
2223 0 : defer readState.unref()
2224 0 :
2225 0 : for level, files := range readState.current.Levels {
2226 0 : iter := files.Iter()
2227 0 : if level > 0 {
2228 0 : // We can only use `Overlaps` to restrict `files` at L1+ since at L0 it
2229 0 : // expands the range iteratively until it has found a set of files that
2230 0 : // do not overlap any other L0 files outside that set.
2231 0 : overlaps := readState.current.Overlaps(level, d.opts.Comparer.Compare, start, end, false /* exclusiveEnd */)
2232 0 : iter = overlaps.Iter()
2233 0 : }
2234 0 : for file := iter.First(); file != nil; file = iter.Next() {
2235 0 : if d.opts.Comparer.Compare(start, file.Smallest.UserKey) <= 0 &&
2236 0 : d.opts.Comparer.Compare(file.Largest.UserKey, end) <= 0 {
2237 0 : // The range fully contains the file, so skip looking it up in
2238 0 : // table cache/looking at its indexes, and add the full file size.
2239 0 : meta, err := d.objProvider.Lookup(fileTypeTable, file.FileBacking.DiskFileNum)
2240 0 : if err != nil {
2241 0 : return 0, 0, 0, err
2242 0 : }
2243 0 : if meta.IsRemote() {
2244 0 : remoteSize += file.Size
2245 0 : if meta.Remote.CleanupMethod == objstorage.SharedNoCleanup {
2246 0 : externalSize += file.Size
2247 0 : }
2248 : }
2249 0 : totalSize += file.Size
2250 0 : } else if d.opts.Comparer.Compare(file.Smallest.UserKey, end) <= 0 &&
2251 0 : d.opts.Comparer.Compare(start, file.Largest.UserKey) <= 0 {
2252 0 : var size uint64
2253 0 : var err error
2254 0 : if file.Virtual {
2255 0 : err = d.tableCache.withVirtualReader(
2256 0 : file.VirtualMeta(),
2257 0 : func(r sstable.VirtualReader) (err error) {
2258 0 : size, err = r.EstimateDiskUsage(start, end)
2259 0 : return err
2260 0 : },
2261 : )
2262 0 : } else {
2263 0 : err = d.tableCache.withReader(
2264 0 : file.PhysicalMeta(),
2265 0 : func(r *sstable.Reader) (err error) {
2266 0 : size, err = r.EstimateDiskUsage(start, end)
2267 0 : return err
2268 0 : },
2269 : )
2270 : }
2271 0 : if err != nil {
2272 0 : return 0, 0, 0, err
2273 0 : }
2274 0 : meta, err := d.objProvider.Lookup(fileTypeTable, file.FileBacking.DiskFileNum)
2275 0 : if err != nil {
2276 0 : return 0, 0, 0, err
2277 0 : }
2278 0 : if meta.IsRemote() {
2279 0 : remoteSize += size
2280 0 : if meta.Remote.CleanupMethod == objstorage.SharedNoCleanup {
2281 0 : externalSize += size
2282 0 : }
2283 : }
2284 0 : totalSize += size
2285 : }
2286 : }
2287 : }
2288 0 : return totalSize, remoteSize, externalSize, nil
2289 : }
2290 :
2291 1 : func (d *DB) walPreallocateSize() int {
2292 1 : // Set the WAL preallocate size to 110% of the memtable size. Note that there
2293 1 : // is a bit of apples and oranges in units here as the memtabls size
2294 1 : // corresponds to the memory usage of the memtable while the WAL size is the
2295 1 : // size of the batches (plus overhead) stored in the WAL.
2296 1 : //
2297 1 : // TODO(peter): 110% of the memtable size is quite hefty for a block
2298 1 : // size. This logic is taken from GetWalPreallocateBlockSize in
2299 1 : // RocksDB. Could a smaller preallocation block size be used?
2300 1 : size := d.opts.MemTableSize
2301 1 : size = (size / 10) + size
2302 1 : return int(size)
2303 1 : }
2304 :
2305 1 : func (d *DB) newMemTable(logNum FileNum, logSeqNum uint64) (*memTable, *flushableEntry) {
2306 1 : size := d.mu.mem.nextSize
2307 1 : if d.mu.mem.nextSize < d.opts.MemTableSize {
2308 1 : d.mu.mem.nextSize *= 2
2309 1 : if d.mu.mem.nextSize > d.opts.MemTableSize {
2310 0 : d.mu.mem.nextSize = d.opts.MemTableSize
2311 0 : }
2312 : }
2313 :
2314 1 : memtblOpts := memTableOptions{
2315 1 : Options: d.opts,
2316 1 : logSeqNum: logSeqNum,
2317 1 : }
2318 1 :
2319 1 : // Before attempting to allocate a new memtable, check if there's one
2320 1 : // available for recycling in memTableRecycle. Large contiguous allocations
2321 1 : // can be costly as fragmentation makes it more difficult to find a large
2322 1 : // contiguous free space. We've observed 64MB allocations taking 10ms+.
2323 1 : //
2324 1 : // To reduce these costly allocations, up to 1 obsolete memtable is stashed
2325 1 : // in `d.memTableRecycle` to allow a future memtable rotation to reuse
2326 1 : // existing memory.
2327 1 : var mem *memTable
2328 1 : mem = d.memTableRecycle.Swap(nil)
2329 1 : if mem != nil && uint64(len(mem.arenaBuf)) != size {
2330 1 : d.freeMemTable(mem)
2331 1 : mem = nil
2332 1 : }
2333 1 : if mem != nil {
2334 1 : // Carry through the existing buffer and memory reservation.
2335 1 : memtblOpts.arenaBuf = mem.arenaBuf
2336 1 : memtblOpts.releaseAccountingReservation = mem.releaseAccountingReservation
2337 1 : } else {
2338 1 : mem = new(memTable)
2339 1 : memtblOpts.arenaBuf = manual.New(int(size))
2340 1 : memtblOpts.releaseAccountingReservation = d.opts.Cache.Reserve(int(size))
2341 1 : d.memTableCount.Add(1)
2342 1 : d.memTableReserved.Add(int64(size))
2343 1 :
2344 1 : // Note: this is a no-op if invariants are disabled or race is enabled.
2345 1 : invariants.SetFinalizer(mem, checkMemTable)
2346 1 : }
2347 1 : mem.init(memtblOpts)
2348 1 :
2349 1 : entry := d.newFlushableEntry(mem, logNum, logSeqNum)
2350 1 : entry.releaseMemAccounting = func() {
2351 1 : // If the user leaks iterators, we may be releasing the memtable after
2352 1 : // the DB is already closed. In this case, we want to just release the
2353 1 : // memory because DB.Close won't come along to free it for us.
2354 1 : if err := d.closed.Load(); err != nil {
2355 1 : d.freeMemTable(mem)
2356 1 : return
2357 1 : }
2358 :
2359 : // The next memtable allocation might be able to reuse this memtable.
2360 : // Stash it on d.memTableRecycle.
2361 1 : if unusedMem := d.memTableRecycle.Swap(mem); unusedMem != nil {
2362 1 : // There was already a memtable waiting to be recycled. We're now
2363 1 : // responsible for freeing it.
2364 1 : d.freeMemTable(unusedMem)
2365 1 : }
2366 : }
2367 1 : return mem, entry
2368 : }
2369 :
2370 1 : func (d *DB) freeMemTable(m *memTable) {
2371 1 : d.memTableCount.Add(-1)
2372 1 : d.memTableReserved.Add(-int64(len(m.arenaBuf)))
2373 1 : m.free()
2374 1 : }
2375 :
2376 1 : func (d *DB) newFlushableEntry(f flushable, logNum FileNum, logSeqNum uint64) *flushableEntry {
2377 1 : fe := &flushableEntry{
2378 1 : flushable: f,
2379 1 : flushed: make(chan struct{}),
2380 1 : logNum: logNum,
2381 1 : logSeqNum: logSeqNum,
2382 1 : deleteFn: d.mu.versions.addObsolete,
2383 1 : deleteFnLocked: d.mu.versions.addObsoleteLocked,
2384 1 : }
2385 1 : fe.readerRefs.Store(1)
2386 1 : return fe
2387 1 : }
2388 :
2389 : // makeRoomForWrite ensures that the memtable has room to hold the contents of
2390 : // Batch. It reserves the space in the memtable and adds a reference to the
2391 : // memtable. The caller must later ensure that the memtable is unreferenced. If
2392 : // the memtable is full, or a nil Batch is provided, the current memtable is
2393 : // rotated (marked as immutable) and a new mutable memtable is allocated. This
2394 : // memtable rotation also causes a log rotation.
2395 : //
2396 : // Both DB.mu and commitPipeline.mu must be held by the caller. Note that DB.mu
2397 : // may be released and reacquired.
2398 1 : func (d *DB) makeRoomForWrite(b *Batch) error {
2399 1 : if b != nil && b.ingestedSSTBatch {
2400 0 : panic("pebble: invalid function call")
2401 : }
2402 :
2403 1 : force := b == nil || b.flushable != nil
2404 1 : stalled := false
2405 1 : for {
2406 1 : if b != nil && b.flushable == nil {
2407 1 : err := d.mu.mem.mutable.prepare(b)
2408 1 : if err != arenaskl.ErrArenaFull {
2409 1 : if stalled {
2410 1 : d.opts.EventListener.WriteStallEnd()
2411 1 : }
2412 1 : return err
2413 : }
2414 1 : } else if !force {
2415 1 : if stalled {
2416 1 : d.opts.EventListener.WriteStallEnd()
2417 1 : }
2418 1 : return nil
2419 : }
2420 : // force || err == ErrArenaFull, so we need to rotate the current memtable.
2421 1 : {
2422 1 : var size uint64
2423 1 : for i := range d.mu.mem.queue {
2424 1 : size += d.mu.mem.queue[i].totalBytes()
2425 1 : }
2426 1 : if size >= uint64(d.opts.MemTableStopWritesThreshold)*d.opts.MemTableSize {
2427 1 : // We have filled up the current memtable, but already queued memtables
2428 1 : // are still flushing, so we wait.
2429 1 : if !stalled {
2430 1 : stalled = true
2431 1 : d.opts.EventListener.WriteStallBegin(WriteStallBeginInfo{
2432 1 : Reason: "memtable count limit reached",
2433 1 : })
2434 1 : }
2435 1 : now := time.Now()
2436 1 : d.mu.compact.cond.Wait()
2437 1 : if b != nil {
2438 1 : b.commitStats.MemTableWriteStallDuration += time.Since(now)
2439 1 : }
2440 1 : continue
2441 : }
2442 : }
2443 1 : l0ReadAmp := d.mu.versions.currentVersion().L0Sublevels.ReadAmplification()
2444 1 : if l0ReadAmp >= d.opts.L0StopWritesThreshold {
2445 1 : // There are too many level-0 files, so we wait.
2446 1 : if !stalled {
2447 1 : stalled = true
2448 1 : d.opts.EventListener.WriteStallBegin(WriteStallBeginInfo{
2449 1 : Reason: "L0 file count limit exceeded",
2450 1 : })
2451 1 : }
2452 1 : now := time.Now()
2453 1 : d.mu.compact.cond.Wait()
2454 1 : if b != nil {
2455 1 : b.commitStats.L0ReadAmpWriteStallDuration += time.Since(now)
2456 1 : }
2457 1 : continue
2458 : }
2459 :
2460 1 : var newLogNum base.FileNum
2461 1 : var prevLogSize uint64
2462 1 : if !d.opts.DisableWAL {
2463 1 : now := time.Now()
2464 1 : newLogNum, prevLogSize = d.recycleWAL()
2465 1 : if b != nil {
2466 1 : b.commitStats.WALRotationDuration += time.Since(now)
2467 1 : }
2468 : }
2469 :
2470 1 : immMem := d.mu.mem.mutable
2471 1 : imm := d.mu.mem.queue[len(d.mu.mem.queue)-1]
2472 1 : imm.logSize = prevLogSize
2473 1 : imm.flushForced = imm.flushForced || (b == nil)
2474 1 :
2475 1 : // If we are manually flushing and we used less than half of the bytes in
2476 1 : // the memtable, don't increase the size for the next memtable. This
2477 1 : // reduces memtable memory pressure when an application is frequently
2478 1 : // manually flushing.
2479 1 : if (b == nil) && uint64(immMem.availBytes()) > immMem.totalBytes()/2 {
2480 1 : d.mu.mem.nextSize = immMem.totalBytes()
2481 1 : }
2482 :
2483 1 : if b != nil && b.flushable != nil {
2484 1 : // The batch is too large to fit in the memtable so add it directly to
2485 1 : // the immutable queue. The flushable batch is associated with the same
2486 1 : // log as the immutable memtable, but logically occurs after it in
2487 1 : // seqnum space. We ensure while flushing that the flushable batch
2488 1 : // is flushed along with the previous memtable in the flushable
2489 1 : // queue. See the top level comment in DB.flush1 to learn how this
2490 1 : // is ensured.
2491 1 : //
2492 1 : // See DB.commitWrite for the special handling of log writes for large
2493 1 : // batches. In particular, the large batch has already written to
2494 1 : // imm.logNum.
2495 1 : entry := d.newFlushableEntry(b.flushable, imm.logNum, b.SeqNum())
2496 1 : // The large batch is by definition large. Reserve space from the cache
2497 1 : // for it until it is flushed.
2498 1 : entry.releaseMemAccounting = d.opts.Cache.Reserve(int(b.flushable.totalBytes()))
2499 1 : d.mu.mem.queue = append(d.mu.mem.queue, entry)
2500 1 : }
2501 :
2502 1 : var logSeqNum uint64
2503 1 : if b != nil {
2504 1 : logSeqNum = b.SeqNum()
2505 1 : if b.flushable != nil {
2506 1 : logSeqNum += uint64(b.Count())
2507 1 : }
2508 1 : } else {
2509 1 : logSeqNum = d.mu.versions.logSeqNum.Load()
2510 1 : }
2511 1 : d.rotateMemtable(newLogNum, logSeqNum, immMem)
2512 1 : force = false
2513 : }
2514 : }
2515 :
2516 : // Both DB.mu and commitPipeline.mu must be held by the caller.
2517 1 : func (d *DB) rotateMemtable(newLogNum FileNum, logSeqNum uint64, prev *memTable) {
2518 1 : // Create a new memtable, scheduling the previous one for flushing. We do
2519 1 : // this even if the previous memtable was empty because the DB.Flush
2520 1 : // mechanism is dependent on being able to wait for the empty memtable to
2521 1 : // flush. We can't just mark the empty memtable as flushed here because we
2522 1 : // also have to wait for all previous immutable tables to
2523 1 : // flush. Additionally, the memtable is tied to particular WAL file and we
2524 1 : // want to go through the flush path in order to recycle that WAL file.
2525 1 : //
2526 1 : // NB: newLogNum corresponds to the WAL that contains mutations that are
2527 1 : // present in the new memtable. When immutable memtables are flushed to
2528 1 : // disk, a VersionEdit will be created telling the manifest the minimum
2529 1 : // unflushed log number (which will be the next one in d.mu.mem.mutable
2530 1 : // that was not flushed).
2531 1 : //
2532 1 : // NB: prev should be the current mutable memtable.
2533 1 : var entry *flushableEntry
2534 1 : d.mu.mem.mutable, entry = d.newMemTable(newLogNum, logSeqNum)
2535 1 : d.mu.mem.queue = append(d.mu.mem.queue, entry)
2536 1 : d.updateReadStateLocked(nil)
2537 1 : if prev.writerUnref() {
2538 1 : d.maybeScheduleFlush()
2539 1 : }
2540 : }
2541 :
2542 : // Both DB.mu and commitPipeline.mu must be held by the caller. Note that DB.mu
2543 : // may be released and reacquired.
2544 1 : func (d *DB) recycleWAL() (newLogNum FileNum, prevLogSize uint64) {
2545 1 : if d.opts.DisableWAL {
2546 0 : panic("pebble: invalid function call")
2547 : }
2548 :
2549 1 : jobID := d.mu.nextJobID
2550 1 : d.mu.nextJobID++
2551 1 : newLogNum = d.mu.versions.getNextFileNum()
2552 1 :
2553 1 : prevLogSize = uint64(d.mu.log.Size())
2554 1 :
2555 1 : // The previous log may have grown past its original physical
2556 1 : // size. Update its file size in the queue so we have a proper
2557 1 : // accounting of its file size.
2558 1 : if d.mu.log.queue[len(d.mu.log.queue)-1].fileSize < prevLogSize {
2559 1 : d.mu.log.queue[len(d.mu.log.queue)-1].fileSize = prevLogSize
2560 1 : }
2561 1 : d.mu.Unlock()
2562 1 :
2563 1 : var err error
2564 1 : // Close the previous log first. This writes an EOF trailer
2565 1 : // signifying the end of the file and syncs it to disk. We must
2566 1 : // close the previous log before linking the new log file,
2567 1 : // otherwise a crash could leave both logs with unclean tails, and
2568 1 : // Open will treat the previous log as corrupt.
2569 1 : err = d.mu.log.LogWriter.Close()
2570 1 : metrics := d.mu.log.LogWriter.Metrics()
2571 1 : d.mu.Lock()
2572 1 : if err := d.mu.log.metrics.Merge(metrics); err != nil {
2573 0 : d.opts.Logger.Infof("metrics error: %s", err)
2574 0 : }
2575 1 : d.mu.Unlock()
2576 1 :
2577 1 : newLogName := base.MakeFilepath(d.opts.FS, d.walDirname, fileTypeLog, newLogNum.DiskFileNum())
2578 1 :
2579 1 : // Try to use a recycled log file. Recycling log files is an important
2580 1 : // performance optimization as it is faster to sync a file that has
2581 1 : // already been written, than one which is being written for the first
2582 1 : // time. This is due to the need to sync file metadata when a file is
2583 1 : // being written for the first time. Note this is true even if file
2584 1 : // preallocation is performed (e.g. fallocate).
2585 1 : var recycleLog fileInfo
2586 1 : var recycleOK bool
2587 1 : var newLogFile vfs.File
2588 1 : if err == nil {
2589 1 : recycleLog, recycleOK = d.logRecycler.peek()
2590 1 : if recycleOK {
2591 0 : recycleLogName := base.MakeFilepath(d.opts.FS, d.walDirname, fileTypeLog, recycleLog.fileNum)
2592 0 : newLogFile, err = d.opts.FS.ReuseForWrite(recycleLogName, newLogName)
2593 0 : base.MustExist(d.opts.FS, newLogName, d.opts.Logger, err)
2594 1 : } else {
2595 1 : newLogFile, err = d.opts.FS.Create(newLogName)
2596 1 : base.MustExist(d.opts.FS, newLogName, d.opts.Logger, err)
2597 1 : }
2598 : }
2599 :
2600 1 : var newLogSize uint64
2601 1 : if err == nil && recycleOK {
2602 0 : // Figure out the recycled WAL size. This Stat is necessary
2603 0 : // because ReuseForWrite's contract allows for removing the
2604 0 : // old file and creating a new one. We don't know whether the
2605 0 : // WAL was actually recycled.
2606 0 : // TODO(jackson): Adding a boolean to the ReuseForWrite return
2607 0 : // value indicating whether or not the file was actually
2608 0 : // reused would allow us to skip the stat and use
2609 0 : // recycleLog.fileSize.
2610 0 : var finfo os.FileInfo
2611 0 : finfo, err = newLogFile.Stat()
2612 0 : if err == nil {
2613 0 : newLogSize = uint64(finfo.Size())
2614 0 : }
2615 : }
2616 :
2617 1 : if err == nil {
2618 1 : // TODO(peter): RocksDB delays sync of the parent directory until the
2619 1 : // first time the log is synced. Is that worthwhile?
2620 1 : err = d.walDir.Sync()
2621 1 : }
2622 :
2623 1 : if err != nil && newLogFile != nil {
2624 0 : newLogFile.Close()
2625 1 : } else if err == nil {
2626 1 : newLogFile = vfs.NewSyncingFile(newLogFile, vfs.SyncingFileOptions{
2627 1 : NoSyncOnClose: d.opts.NoSyncOnClose,
2628 1 : BytesPerSync: d.opts.WALBytesPerSync,
2629 1 : PreallocateSize: d.walPreallocateSize(),
2630 1 : })
2631 1 : }
2632 :
2633 1 : if recycleOK {
2634 0 : err = firstError(err, d.logRecycler.pop(recycleLog.fileNum.FileNum()))
2635 0 : }
2636 :
2637 1 : d.opts.EventListener.WALCreated(WALCreateInfo{
2638 1 : JobID: jobID,
2639 1 : Path: newLogName,
2640 1 : FileNum: newLogNum,
2641 1 : RecycledFileNum: recycleLog.fileNum.FileNum(),
2642 1 : Err: err,
2643 1 : })
2644 1 :
2645 1 : d.mu.Lock()
2646 1 :
2647 1 : d.mu.versions.metrics.WAL.Files++
2648 1 :
2649 1 : if err != nil {
2650 0 : // TODO(peter): avoid chewing through file numbers in a tight loop if there
2651 0 : // is an error here.
2652 0 : //
2653 0 : // What to do here? Stumbling on doesn't seem worthwhile. If we failed to
2654 0 : // close the previous log it is possible we lost a write.
2655 0 : panic(err)
2656 : }
2657 :
2658 1 : d.mu.log.queue = append(d.mu.log.queue, fileInfo{fileNum: newLogNum.DiskFileNum(), fileSize: newLogSize})
2659 1 : d.mu.log.LogWriter = record.NewLogWriter(newLogFile, newLogNum, record.LogWriterConfig{
2660 1 : WALFsyncLatency: d.mu.log.metrics.fsyncLatency,
2661 1 : WALMinSyncInterval: d.opts.WALMinSyncInterval,
2662 1 : QueueSemChan: d.commit.logSyncQSem,
2663 1 : })
2664 1 : if d.mu.log.registerLogWriterForTesting != nil {
2665 0 : d.mu.log.registerLogWriterForTesting(d.mu.log.LogWriter)
2666 0 : }
2667 :
2668 1 : return
2669 : }
2670 :
2671 1 : func (d *DB) getEarliestUnflushedSeqNumLocked() uint64 {
2672 1 : seqNum := InternalKeySeqNumMax
2673 1 : for i := range d.mu.mem.queue {
2674 1 : logSeqNum := d.mu.mem.queue[i].logSeqNum
2675 1 : if seqNum > logSeqNum {
2676 1 : seqNum = logSeqNum
2677 1 : }
2678 : }
2679 1 : return seqNum
2680 : }
2681 :
2682 1 : func (d *DB) getInProgressCompactionInfoLocked(finishing *compaction) (rv []compactionInfo) {
2683 1 : for c := range d.mu.compact.inProgress {
2684 1 : if len(c.flushing) == 0 && (finishing == nil || c != finishing) {
2685 1 : info := compactionInfo{
2686 1 : versionEditApplied: c.versionEditApplied,
2687 1 : inputs: c.inputs,
2688 1 : smallest: c.smallest,
2689 1 : largest: c.largest,
2690 1 : outputLevel: -1,
2691 1 : }
2692 1 : if c.outputLevel != nil {
2693 1 : info.outputLevel = c.outputLevel.level
2694 1 : }
2695 1 : rv = append(rv, info)
2696 : }
2697 : }
2698 1 : return
2699 : }
2700 :
2701 1 : func inProgressL0Compactions(inProgress []compactionInfo) []manifest.L0Compaction {
2702 1 : var compactions []manifest.L0Compaction
2703 1 : for _, info := range inProgress {
2704 1 : // Skip in-progress compactions that have already committed; the L0
2705 1 : // sublevels initialization code requires the set of in-progress
2706 1 : // compactions to be consistent with the current version. Compactions
2707 1 : // with versionEditApplied=true are already applied to the current
2708 1 : // version and but are performing cleanup without the database mutex.
2709 1 : if info.versionEditApplied {
2710 1 : continue
2711 : }
2712 1 : l0 := false
2713 1 : for _, cl := range info.inputs {
2714 1 : l0 = l0 || cl.level == 0
2715 1 : }
2716 1 : if !l0 {
2717 1 : continue
2718 : }
2719 1 : compactions = append(compactions, manifest.L0Compaction{
2720 1 : Smallest: info.smallest,
2721 1 : Largest: info.largest,
2722 1 : IsIntraL0: info.outputLevel == 0,
2723 1 : })
2724 : }
2725 1 : return compactions
2726 : }
2727 :
2728 : // firstError returns the first non-nil error of err0 and err1, or nil if both
2729 : // are nil.
2730 1 : func firstError(err0, err1 error) error {
2731 1 : if err0 != nil {
2732 1 : return err0
2733 1 : }
2734 1 : return err1
2735 : }
2736 :
2737 : // SetCreatorID sets the CreatorID which is needed in order to use shared objects.
2738 : // Remote object usage is disabled until this method is called the first time.
2739 : // Once set, the Creator ID is persisted and cannot change.
2740 : //
2741 : // Does nothing if SharedStorage was not set in the options when the DB was
2742 : // opened or if the DB is in read-only mode.
2743 1 : func (d *DB) SetCreatorID(creatorID uint64) error {
2744 1 : if d.opts.Experimental.RemoteStorage == nil || d.opts.ReadOnly {
2745 0 : return nil
2746 0 : }
2747 1 : return d.objProvider.SetCreatorID(objstorage.CreatorID(creatorID))
2748 : }
2749 :
2750 : // KeyStatistics keeps track of the number of keys that have been pinned by a
2751 : // snapshot as well as counts of the different key kinds in the lsm.
2752 : type KeyStatistics struct {
2753 : // when a compaction determines a key is obsolete, but cannot elide the key
2754 : // because it's required by an open snapshot.
2755 : SnapshotPinnedKeys int
2756 : // the total number of bytes of all snapshot pinned keys.
2757 : SnapshotPinnedKeysBytes uint64
2758 : // Note: these fields are currently only populated for point keys (including range deletes).
2759 : KindsCount [InternalKeyKindMax + 1]int
2760 : }
2761 :
2762 : // LSMKeyStatistics is used by DB.ScanStatistics.
2763 : type LSMKeyStatistics struct {
2764 : Accumulated KeyStatistics
2765 : // Levels contains statistics only for point keys. Range deletions and range keys will
2766 : // appear in Accumulated but not Levels.
2767 : Levels [numLevels]KeyStatistics
2768 : // BytesRead represents the logical, pre-compression size of keys and values read
2769 : BytesRead uint64
2770 : }
2771 :
2772 : // ScanStatisticsOptions is used by DB.ScanStatistics.
2773 : type ScanStatisticsOptions struct {
2774 : // LimitBytesPerSecond indicates the number of bytes that are able to be read
2775 : // per second using ScanInternal.
2776 : // A value of 0 indicates that there is no limit set.
2777 : LimitBytesPerSecond int64
2778 : }
2779 :
2780 : // ScanStatistics returns the count of different key kinds within the lsm for a
2781 : // key span [lower, upper) as well as the number of snapshot keys.
2782 : func (d *DB) ScanStatistics(
2783 : ctx context.Context, lower, upper []byte, opts ScanStatisticsOptions,
2784 0 : ) (LSMKeyStatistics, error) {
2785 0 : stats := LSMKeyStatistics{}
2786 0 : var prevKey InternalKey
2787 0 : var rateLimitFunc func(key *InternalKey, val LazyValue) error
2788 0 : tb := tokenbucket.TokenBucket{}
2789 0 :
2790 0 : if opts.LimitBytesPerSecond != 0 {
2791 0 : // Each "token" roughly corresponds to a byte that was read.
2792 0 : tb.Init(tokenbucket.TokensPerSecond(opts.LimitBytesPerSecond), tokenbucket.Tokens(1024))
2793 0 : rateLimitFunc = func(key *InternalKey, val LazyValue) error {
2794 0 : return tb.WaitCtx(ctx, tokenbucket.Tokens(key.Size()+val.Len()))
2795 0 : }
2796 : }
2797 :
2798 0 : scanInternalOpts := &scanInternalOptions{
2799 0 : visitPointKey: func(key *InternalKey, value LazyValue, iterInfo IteratorLevel) error {
2800 0 : // If the previous key is equal to the current point key, the current key was
2801 0 : // pinned by a snapshot.
2802 0 : size := uint64(key.Size())
2803 0 : kind := key.Kind()
2804 0 : if iterInfo.Kind == IteratorLevelLSM && d.equal(prevKey.UserKey, key.UserKey) {
2805 0 : stats.Levels[iterInfo.Level].SnapshotPinnedKeys++
2806 0 : stats.Levels[iterInfo.Level].SnapshotPinnedKeysBytes += size
2807 0 : stats.Accumulated.SnapshotPinnedKeys++
2808 0 : stats.Accumulated.SnapshotPinnedKeysBytes += size
2809 0 : }
2810 0 : if iterInfo.Kind == IteratorLevelLSM {
2811 0 : stats.Levels[iterInfo.Level].KindsCount[kind]++
2812 0 : }
2813 :
2814 0 : stats.Accumulated.KindsCount[kind]++
2815 0 : prevKey.CopyFrom(*key)
2816 0 : stats.BytesRead += uint64(key.Size() + value.Len())
2817 0 : return nil
2818 : },
2819 0 : visitRangeDel: func(start, end []byte, seqNum uint64) error {
2820 0 : stats.Accumulated.KindsCount[InternalKeyKindRangeDelete]++
2821 0 : stats.BytesRead += uint64(len(start) + len(end))
2822 0 : return nil
2823 0 : },
2824 0 : visitRangeKey: func(start, end []byte, keys []rangekey.Key) error {
2825 0 : stats.BytesRead += uint64(len(start) + len(end))
2826 0 : for _, key := range keys {
2827 0 : stats.Accumulated.KindsCount[key.Kind()]++
2828 0 : stats.BytesRead += uint64(len(key.Value) + len(key.Suffix))
2829 0 : }
2830 0 : return nil
2831 : },
2832 : includeObsoleteKeys: true,
2833 : IterOptions: IterOptions{
2834 : KeyTypes: IterKeyTypePointsAndRanges,
2835 : LowerBound: lower,
2836 : UpperBound: upper,
2837 : },
2838 : rateLimitFunc: rateLimitFunc,
2839 : }
2840 0 : iter := d.newInternalIter(snapshotIterOpts{}, scanInternalOpts)
2841 0 : defer iter.close()
2842 0 :
2843 0 : err := scanInternalImpl(ctx, lower, upper, iter, scanInternalOpts)
2844 0 :
2845 0 : if err != nil {
2846 0 : return LSMKeyStatistics{}, err
2847 0 : }
2848 :
2849 0 : return stats, nil
2850 : }
2851 :
2852 : // ObjProvider returns the objstorage.Provider for this database. Meant to be
2853 : // used for internal purposes only.
2854 0 : func (d *DB) ObjProvider() objstorage.Provider {
2855 0 : return d.objProvider
2856 0 : }
2857 :
2858 0 : func (d *DB) checkVirtualBounds(m *fileMetadata) {
2859 0 : if !invariants.Enabled {
2860 0 : return
2861 0 : }
2862 :
2863 0 : if m.HasPointKeys {
2864 0 : pointIter, rangeDelIter, err := d.newIters(context.TODO(), m, nil, internalIterOpts{})
2865 0 : if err != nil {
2866 0 : panic(errors.Wrap(err, "pebble: error creating point iterator"))
2867 : }
2868 :
2869 0 : defer pointIter.Close()
2870 0 : if rangeDelIter != nil {
2871 0 : defer rangeDelIter.Close()
2872 0 : }
2873 :
2874 0 : pointKey, _ := pointIter.First()
2875 0 : var rangeDel *keyspan.Span
2876 0 : if rangeDelIter != nil {
2877 0 : rangeDel = rangeDelIter.First()
2878 0 : }
2879 :
2880 : // Check that the lower bound is tight.
2881 0 : if (rangeDel == nil || d.cmp(rangeDel.SmallestKey().UserKey, m.SmallestPointKey.UserKey) != 0) &&
2882 0 : (pointKey == nil || d.cmp(pointKey.UserKey, m.SmallestPointKey.UserKey) != 0) {
2883 0 : panic(errors.Newf("pebble: virtual sstable %s lower point key bound is not tight", m.FileNum))
2884 : }
2885 :
2886 0 : pointKey, _ = pointIter.Last()
2887 0 : rangeDel = nil
2888 0 : if rangeDelIter != nil {
2889 0 : rangeDel = rangeDelIter.Last()
2890 0 : }
2891 :
2892 : // Check that the upper bound is tight.
2893 0 : if (rangeDel == nil || d.cmp(rangeDel.LargestKey().UserKey, m.LargestPointKey.UserKey) != 0) &&
2894 0 : (pointKey == nil || d.cmp(pointKey.UserKey, m.LargestPointKey.UserKey) != 0) {
2895 0 : panic(errors.Newf("pebble: virtual sstable %s upper point key bound is not tight", m.FileNum))
2896 : }
2897 :
2898 : // Check that iterator keys are within bounds.
2899 0 : for key, _ := pointIter.First(); key != nil; key, _ = pointIter.Next() {
2900 0 : if d.cmp(key.UserKey, m.SmallestPointKey.UserKey) < 0 || d.cmp(key.UserKey, m.LargestPointKey.UserKey) > 0 {
2901 0 : panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, key.UserKey))
2902 : }
2903 : }
2904 :
2905 0 : if rangeDelIter != nil {
2906 0 : for key := rangeDelIter.First(); key != nil; key = rangeDelIter.Next() {
2907 0 : if d.cmp(key.SmallestKey().UserKey, m.SmallestPointKey.UserKey) < 0 {
2908 0 : panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, key.SmallestKey().UserKey))
2909 : }
2910 :
2911 0 : if d.cmp(key.LargestKey().UserKey, m.LargestPointKey.UserKey) > 0 {
2912 0 : panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, key.LargestKey().UserKey))
2913 : }
2914 : }
2915 : }
2916 : }
2917 :
2918 0 : if !m.HasRangeKeys {
2919 0 : return
2920 0 : }
2921 :
2922 0 : rangeKeyIter, err := d.tableNewRangeKeyIter(m, keyspan.SpanIterOptions{})
2923 0 : defer rangeKeyIter.Close()
2924 0 :
2925 0 : if err != nil {
2926 0 : panic(errors.Wrap(err, "pebble: error creating range key iterator"))
2927 : }
2928 :
2929 : // Check that the lower bound is tight.
2930 0 : if d.cmp(rangeKeyIter.First().SmallestKey().UserKey, m.SmallestRangeKey.UserKey) != 0 {
2931 0 : panic(errors.Newf("pebble: virtual sstable %s lower range key bound is not tight", m.FileNum))
2932 : }
2933 :
2934 : // Check that upper bound is tight.
2935 0 : if d.cmp(rangeKeyIter.Last().LargestKey().UserKey, m.LargestRangeKey.UserKey) != 0 {
2936 0 : panic(errors.Newf("pebble: virtual sstable %s upper range key bound is not tight", m.FileNum))
2937 : }
2938 :
2939 0 : for key := rangeKeyIter.First(); key != nil; key = rangeKeyIter.Next() {
2940 0 : if d.cmp(key.SmallestKey().UserKey, m.SmallestRangeKey.UserKey) < 0 {
2941 0 : panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, key.SmallestKey().UserKey))
2942 : }
2943 0 : if d.cmp(key.LargestKey().UserKey, m.LargestRangeKey.UserKey) > 0 {
2944 0 : panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, key.LargestKey().UserKey))
2945 : }
2946 : }
2947 : }
|