LCOV - code coverage report
Current view: top level - pebble - db.go (source / functions) Hit Total Coverage
Test: 2024-05-23 08:15Z cfcd8254 - tests + meta.lcov Lines: 1469 1661 88.4 %
Date: 2024-05-23 08:16:44 Functions: 0 0 -

          Line data    Source code
       1             : // Copyright 2012 The LevelDB-Go and Pebble Authors. All rights reserved. Use
       2             : // of this source code is governed by a BSD-style license that can be found in
       3             : // the LICENSE file.
       4             : 
       5             : // Package pebble provides an ordered key/value store.
       6             : package pebble // import "github.com/cockroachdb/pebble"
       7             : 
       8             : import (
       9             :         "context"
      10             :         "fmt"
      11             :         "io"
      12             :         "strconv"
      13             :         "sync"
      14             :         "sync/atomic"
      15             :         "time"
      16             : 
      17             :         "github.com/cockroachdb/errors"
      18             :         "github.com/cockroachdb/pebble/internal/arenaskl"
      19             :         "github.com/cockroachdb/pebble/internal/base"
      20             :         "github.com/cockroachdb/pebble/internal/invalidating"
      21             :         "github.com/cockroachdb/pebble/internal/invariants"
      22             :         "github.com/cockroachdb/pebble/internal/keyspan"
      23             :         "github.com/cockroachdb/pebble/internal/keyspan/keyspanimpl"
      24             :         "github.com/cockroachdb/pebble/internal/manifest"
      25             :         "github.com/cockroachdb/pebble/internal/manual"
      26             :         "github.com/cockroachdb/pebble/objstorage"
      27             :         "github.com/cockroachdb/pebble/objstorage/remote"
      28             :         "github.com/cockroachdb/pebble/rangekey"
      29             :         "github.com/cockroachdb/pebble/record"
      30             :         "github.com/cockroachdb/pebble/sstable"
      31             :         "github.com/cockroachdb/pebble/vfs"
      32             :         "github.com/cockroachdb/pebble/vfs/atomicfs"
      33             :         "github.com/cockroachdb/pebble/wal"
      34             :         "github.com/cockroachdb/tokenbucket"
      35             :         "github.com/prometheus/client_golang/prometheus"
      36             : )
      37             : 
      38             : const (
      39             :         // minTableCacheSize is the minimum size of the table cache, for a single db.
      40             :         minTableCacheSize = 64
      41             : 
      42             :         // numNonTableCacheFiles is an approximation for the number of files
      43             :         // that we don't use for table caches, for a given db.
      44             :         numNonTableCacheFiles = 10
      45             : )
      46             : 
      47             : var (
      48             :         // ErrNotFound is returned when a get operation does not find the requested
      49             :         // key.
      50             :         ErrNotFound = base.ErrNotFound
      51             :         // ErrClosed is panicked when an operation is performed on a closed snapshot or
      52             :         // DB. Use errors.Is(err, ErrClosed) to check for this error.
      53             :         ErrClosed = errors.New("pebble: closed")
      54             :         // ErrReadOnly is returned when a write operation is performed on a read-only
      55             :         // database.
      56             :         ErrReadOnly = errors.New("pebble: read-only")
      57             :         // errNoSplit indicates that the user is trying to perform a range key
      58             :         // operation but the configured Comparer does not provide a Split
      59             :         // implementation.
      60             :         errNoSplit = errors.New("pebble: Comparer.Split required for range key operations")
      61             : )
      62             : 
      63             : // Reader is a readable key/value store.
      64             : //
      65             : // It is safe to call Get and NewIter from concurrent goroutines.
      66             : type Reader interface {
      67             :         // Get gets the value for the given key. It returns ErrNotFound if the DB
      68             :         // does not contain the key.
      69             :         //
      70             :         // The caller should not modify the contents of the returned slice, but it is
      71             :         // safe to modify the contents of the argument after Get returns. The
      72             :         // returned slice will remain valid until the returned Closer is closed. On
      73             :         // success, the caller MUST call closer.Close() or a memory leak will occur.
      74             :         Get(key []byte) (value []byte, closer io.Closer, err error)
      75             : 
      76             :         // NewIter returns an iterator that is unpositioned (Iterator.Valid() will
      77             :         // return false). The iterator can be positioned via a call to SeekGE,
      78             :         // SeekLT, First or Last.
      79             :         NewIter(o *IterOptions) (*Iterator, error)
      80             : 
      81             :         // NewIterWithContext is like NewIter, and additionally accepts a context
      82             :         // for tracing.
      83             :         NewIterWithContext(ctx context.Context, o *IterOptions) (*Iterator, error)
      84             : 
      85             :         // Close closes the Reader. It may or may not close any underlying io.Reader
      86             :         // or io.Writer, depending on how the DB was created.
      87             :         //
      88             :         // It is not safe to close a DB until all outstanding iterators are closed.
      89             :         // It is valid to call Close multiple times. Other methods should not be
      90             :         // called after the DB has been closed.
      91             :         Close() error
      92             : }
      93             : 
      94             : // Writer is a writable key/value store.
      95             : //
      96             : // Goroutine safety is dependent on the specific implementation.
      97             : type Writer interface {
      98             :         // Apply the operations contained in the batch to the DB.
      99             :         //
     100             :         // It is safe to modify the contents of the arguments after Apply returns.
     101             :         Apply(batch *Batch, o *WriteOptions) error
     102             : 
     103             :         // Delete deletes the value for the given key. Deletes are blind all will
     104             :         // succeed even if the given key does not exist.
     105             :         //
     106             :         // It is safe to modify the contents of the arguments after Delete returns.
     107             :         Delete(key []byte, o *WriteOptions) error
     108             : 
     109             :         // DeleteSized behaves identically to Delete, but takes an additional
     110             :         // argument indicating the size of the value being deleted. DeleteSized
     111             :         // should be preferred when the caller has the expectation that there exists
     112             :         // a single internal KV pair for the key (eg, the key has not been
     113             :         // overwritten recently), and the caller knows the size of its value.
     114             :         //
     115             :         // DeleteSized will record the value size within the tombstone and use it to
     116             :         // inform compaction-picking heuristics which strive to reduce space
     117             :         // amplification in the LSM. This "calling your shot" mechanic allows the
     118             :         // storage engine to more accurately estimate and reduce space
     119             :         // amplification.
     120             :         //
     121             :         // It is safe to modify the contents of the arguments after DeleteSized
     122             :         // returns.
     123             :         DeleteSized(key []byte, valueSize uint32, _ *WriteOptions) error
     124             : 
     125             :         // SingleDelete is similar to Delete in that it deletes the value for the given key. Like Delete,
     126             :         // it is a blind operation that will succeed even if the given key does not exist.
     127             :         //
     128             :         // WARNING: Undefined (non-deterministic) behavior will result if a key is overwritten and
     129             :         // then deleted using SingleDelete. The record may appear deleted immediately, but be
     130             :         // resurrected at a later time after compactions have been performed. Or the record may
     131             :         // be deleted permanently. A Delete operation lays down a "tombstone" which shadows all
     132             :         // previous versions of a key. The SingleDelete operation is akin to "anti-matter" and will
     133             :         // only delete the most recently written version for a key. These different semantics allow
     134             :         // the DB to avoid propagating a SingleDelete operation during a compaction as soon as the
     135             :         // corresponding Set operation is encountered. These semantics require extreme care to handle
     136             :         // properly. Only use if you have a workload where the performance gain is critical and you
     137             :         // can guarantee that a record is written once and then deleted once.
     138             :         //
     139             :         // SingleDelete is internally transformed into a Delete if the most recent record for a key is either
     140             :         // a Merge or Delete record.
     141             :         //
     142             :         // It is safe to modify the contents of the arguments after SingleDelete returns.
     143             :         SingleDelete(key []byte, o *WriteOptions) error
     144             : 
     145             :         // DeleteRange deletes all of the point keys (and values) in the range
     146             :         // [start,end) (inclusive on start, exclusive on end). DeleteRange does NOT
     147             :         // delete overlapping range keys (eg, keys set via RangeKeySet).
     148             :         //
     149             :         // It is safe to modify the contents of the arguments after DeleteRange
     150             :         // returns.
     151             :         DeleteRange(start, end []byte, o *WriteOptions) error
     152             : 
     153             :         // LogData adds the specified to the batch. The data will be written to the
     154             :         // WAL, but not added to memtables or sstables. Log data is never indexed,
     155             :         // which makes it useful for testing WAL performance.
     156             :         //
     157             :         // It is safe to modify the contents of the argument after LogData returns.
     158             :         LogData(data []byte, opts *WriteOptions) error
     159             : 
     160             :         // Merge merges the value for the given key. The details of the merge are
     161             :         // dependent upon the configured merge operation.
     162             :         //
     163             :         // It is safe to modify the contents of the arguments after Merge returns.
     164             :         Merge(key, value []byte, o *WriteOptions) error
     165             : 
     166             :         // Set sets the value for the given key. It overwrites any previous value
     167             :         // for that key; a DB is not a multi-map.
     168             :         //
     169             :         // It is safe to modify the contents of the arguments after Set returns.
     170             :         Set(key, value []byte, o *WriteOptions) error
     171             : 
     172             :         // RangeKeySet sets a range key mapping the key range [start, end) at the MVCC
     173             :         // timestamp suffix to value. The suffix is optional. If any portion of the key
     174             :         // range [start, end) is already set by a range key with the same suffix value,
     175             :         // RangeKeySet overrides it.
     176             :         //
     177             :         // It is safe to modify the contents of the arguments after RangeKeySet returns.
     178             :         RangeKeySet(start, end, suffix, value []byte, opts *WriteOptions) error
     179             : 
     180             :         // RangeKeyUnset removes a range key mapping the key range [start, end) at the
     181             :         // MVCC timestamp suffix. The suffix may be omitted to remove an unsuffixed
     182             :         // range key. RangeKeyUnset only removes portions of range keys that fall within
     183             :         // the [start, end) key span, and only range keys with suffixes that exactly
     184             :         // match the unset suffix.
     185             :         //
     186             :         // It is safe to modify the contents of the arguments after RangeKeyUnset
     187             :         // returns.
     188             :         RangeKeyUnset(start, end, suffix []byte, opts *WriteOptions) error
     189             : 
     190             :         // RangeKeyDelete deletes all of the range keys in the range [start,end)
     191             :         // (inclusive on start, exclusive on end). It does not delete point keys (for
     192             :         // that use DeleteRange). RangeKeyDelete removes all range keys within the
     193             :         // bounds, including those with or without suffixes.
     194             :         //
     195             :         // It is safe to modify the contents of the arguments after RangeKeyDelete
     196             :         // returns.
     197             :         RangeKeyDelete(start, end []byte, opts *WriteOptions) error
     198             : }
     199             : 
     200             : // CPUWorkHandle represents a handle used by the CPUWorkPermissionGranter API.
     201             : type CPUWorkHandle interface {
     202             :         // Permitted indicates whether Pebble can use additional CPU resources.
     203             :         Permitted() bool
     204             : }
     205             : 
     206             : // CPUWorkPermissionGranter is used to request permission to opportunistically
     207             : // use additional CPUs to speed up internal background work.
     208             : type CPUWorkPermissionGranter interface {
     209             :         // GetPermission returns a handle regardless of whether permission is granted
     210             :         // or not. In the latter case, the handle is only useful for recording
     211             :         // the CPU time actually spent on this calling goroutine.
     212             :         GetPermission(time.Duration) CPUWorkHandle
     213             :         // CPUWorkDone must be called regardless of whether CPUWorkHandle.Permitted
     214             :         // returns true or false.
     215             :         CPUWorkDone(CPUWorkHandle)
     216             : }
     217             : 
     218             : // Use a default implementation for the CPU work granter to avoid excessive nil
     219             : // checks in the code.
     220             : type defaultCPUWorkHandle struct{}
     221             : 
     222           2 : func (d defaultCPUWorkHandle) Permitted() bool {
     223           2 :         return false
     224           2 : }
     225             : 
     226             : type defaultCPUWorkGranter struct{}
     227             : 
     228           2 : func (d defaultCPUWorkGranter) GetPermission(_ time.Duration) CPUWorkHandle {
     229           2 :         return defaultCPUWorkHandle{}
     230           2 : }
     231             : 
     232           2 : func (d defaultCPUWorkGranter) CPUWorkDone(_ CPUWorkHandle) {}
     233             : 
     234             : // DB provides a concurrent, persistent ordered key/value store.
     235             : //
     236             : // A DB's basic operations (Get, Set, Delete) should be self-explanatory. Get
     237             : // and Delete will return ErrNotFound if the requested key is not in the store.
     238             : // Callers are free to ignore this error.
     239             : //
     240             : // A DB also allows for iterating over the key/value pairs in key order. If d
     241             : // is a DB, the code below prints all key/value pairs whose keys are 'greater
     242             : // than or equal to' k:
     243             : //
     244             : //      iter := d.NewIter(readOptions)
     245             : //      for iter.SeekGE(k); iter.Valid(); iter.Next() {
     246             : //              fmt.Printf("key=%q value=%q\n", iter.Key(), iter.Value())
     247             : //      }
     248             : //      return iter.Close()
     249             : //
     250             : // The Options struct holds the optional parameters for the DB, including a
     251             : // Comparer to define a 'less than' relationship over keys. It is always valid
     252             : // to pass a nil *Options, which means to use the default parameter values. Any
     253             : // zero field of a non-nil *Options also means to use the default value for
     254             : // that parameter. Thus, the code below uses a custom Comparer, but the default
     255             : // values for every other parameter:
     256             : //
     257             : //      db := pebble.Open(&Options{
     258             : //              Comparer: myComparer,
     259             : //      })
     260             : type DB struct {
     261             :         // The count and size of referenced memtables. This includes memtables
     262             :         // present in DB.mu.mem.queue, as well as memtables that have been flushed
     263             :         // but are still referenced by an inuse readState, as well as up to one
     264             :         // memTable waiting to be reused and stored in d.memTableRecycle.
     265             :         memTableCount    atomic.Int64
     266             :         memTableReserved atomic.Int64 // number of bytes reserved in the cache for memtables
     267             :         // memTableRecycle holds a pointer to an obsolete memtable. The next
     268             :         // memtable allocation will reuse this memtable if it has not already been
     269             :         // recycled.
     270             :         memTableRecycle atomic.Pointer[memTable]
     271             : 
     272             :         // The logical size of the current WAL.
     273             :         logSize atomic.Uint64
     274             :         // The number of input bytes to the log. This is the raw size of the
     275             :         // batches written to the WAL, without the overhead of the record
     276             :         // envelopes.
     277             :         logBytesIn atomic.Uint64
     278             : 
     279             :         // The number of bytes available on disk.
     280             :         diskAvailBytes atomic.Uint64
     281             : 
     282             :         cacheID        uint64
     283             :         dirname        string
     284             :         opts           *Options
     285             :         cmp            Compare
     286             :         equal          Equal
     287             :         merge          Merge
     288             :         split          Split
     289             :         abbreviatedKey AbbreviatedKey
     290             :         // The threshold for determining when a batch is "large" and will skip being
     291             :         // inserted into a memtable.
     292             :         largeBatchThreshold uint64
     293             :         // The current OPTIONS file number.
     294             :         optionsFileNum base.DiskFileNum
     295             :         // The on-disk size of the current OPTIONS file.
     296             :         optionsFileSize uint64
     297             : 
     298             :         // objProvider is used to access and manage SSTs.
     299             :         objProvider objstorage.Provider
     300             : 
     301             :         fileLock *Lock
     302             :         dataDir  vfs.File
     303             : 
     304             :         tableCache           *tableCacheContainer
     305             :         newIters             tableNewIters
     306             :         tableNewRangeKeyIter keyspanimpl.TableNewSpanIter
     307             : 
     308             :         commit *commitPipeline
     309             : 
     310             :         // readState provides access to the state needed for reading without needing
     311             :         // to acquire DB.mu.
     312             :         readState struct {
     313             :                 sync.RWMutex
     314             :                 val *readState
     315             :         }
     316             : 
     317             :         closed   *atomic.Value
     318             :         closedCh chan struct{}
     319             : 
     320             :         cleanupManager *cleanupManager
     321             : 
     322             :         // During an iterator close, we may asynchronously schedule read compactions.
     323             :         // We want to wait for those goroutines to finish, before closing the DB.
     324             :         // compactionShedulers.Wait() should not be called while the DB.mu is held.
     325             :         compactionSchedulers sync.WaitGroup
     326             : 
     327             :         // The main mutex protecting internal DB state. This mutex encompasses many
     328             :         // fields because those fields need to be accessed and updated atomically. In
     329             :         // particular, the current version, log.*, mem.*, and snapshot list need to
     330             :         // be accessed and updated atomically during compaction.
     331             :         //
     332             :         // Care is taken to avoid holding DB.mu during IO operations. Accomplishing
     333             :         // this sometimes requires releasing DB.mu in a method that was called with
     334             :         // it held. See versionSet.logAndApply() and DB.makeRoomForWrite() for
     335             :         // examples. This is a common pattern, so be careful about expectations that
     336             :         // DB.mu will be held continuously across a set of calls.
     337             :         mu struct {
     338             :                 sync.Mutex
     339             : 
     340             :                 formatVers struct {
     341             :                         // vers is the database's current format major version.
     342             :                         // Backwards-incompatible features are gated behind new
     343             :                         // format major versions and not enabled until a database's
     344             :                         // version is ratcheted upwards.
     345             :                         //
     346             :                         // Although this is under the `mu` prefix, readers may read vers
     347             :                         // atomically without holding d.mu. Writers must only write to this
     348             :                         // value through finalizeFormatVersUpgrade which requires d.mu is
     349             :                         // held.
     350             :                         vers atomic.Uint64
     351             :                         // marker is the atomic marker for the format major version.
     352             :                         // When a database's version is ratcheted upwards, the
     353             :                         // marker is moved in order to atomically record the new
     354             :                         // version.
     355             :                         marker *atomicfs.Marker
     356             :                         // ratcheting when set to true indicates that the database is
     357             :                         // currently in the process of ratcheting the format major version
     358             :                         // to vers + 1. As a part of ratcheting the format major version,
     359             :                         // migrations may drop and re-acquire the mutex.
     360             :                         ratcheting bool
     361             :                 }
     362             : 
     363             :                 // The ID of the next job. Job IDs are passed to event listener
     364             :                 // notifications and act as a mechanism for tying together the events and
     365             :                 // log messages for a single job such as a flush, compaction, or file
     366             :                 // ingestion. Job IDs are not serialized to disk or used for correctness.
     367             :                 nextJobID JobID
     368             : 
     369             :                 // The collection of immutable versions and state about the log and visible
     370             :                 // sequence numbers. Use the pointer here to ensure the atomic fields in
     371             :                 // version set are aligned properly.
     372             :                 versions *versionSet
     373             : 
     374             :                 log struct {
     375             :                         // manager is not protected by mu, but calls to Create must be
     376             :                         // serialized, and happen after the previous writer is closed.
     377             :                         manager wal.Manager
     378             :                         // The Writer is protected by commitPipeline.mu. This allows log writes
     379             :                         // to be performed without holding DB.mu, but requires both
     380             :                         // commitPipeline.mu and DB.mu to be held when rotating the WAL/memtable
     381             :                         // (i.e. makeRoomForWrite). Can be nil.
     382             :                         writer  wal.Writer
     383             :                         metrics struct {
     384             :                                 // fsyncLatency has its own internal synchronization, and is not
     385             :                                 // protected by mu.
     386             :                                 fsyncLatency prometheus.Histogram
     387             :                                 // Updated whenever a wal.Writer is closed.
     388             :                                 record.LogWriterMetrics
     389             :                         }
     390             :                 }
     391             : 
     392             :                 mem struct {
     393             :                         // The current mutable memTable. Readers of the pointer may hold
     394             :                         // either DB.mu or commitPipeline.mu.
     395             :                         //
     396             :                         // Its internal fields are protected by commitPipeline.mu. This
     397             :                         // allows batch commits to be performed without DB.mu as long as no
     398             :                         // memtable rotation is required.
     399             :                         //
     400             :                         // Both commitPipeline.mu and DB.mu must be held when rotating the
     401             :                         // memtable.
     402             :                         mutable *memTable
     403             :                         // Queue of flushables (the mutable memtable is at end). Elements are
     404             :                         // added to the end of the slice and removed from the beginning. Once an
     405             :                         // index is set it is never modified making a fixed slice immutable and
     406             :                         // safe for concurrent reads.
     407             :                         queue flushableList
     408             :                         // nextSize is the size of the next memtable. The memtable size starts at
     409             :                         // min(256KB,Options.MemTableSize) and doubles each time a new memtable
     410             :                         // is allocated up to Options.MemTableSize. This reduces the memory
     411             :                         // footprint of memtables when lots of DB instances are used concurrently
     412             :                         // in test environments.
     413             :                         nextSize uint64
     414             :                 }
     415             : 
     416             :                 compact struct {
     417             :                         // Condition variable used to signal when a flush or compaction has
     418             :                         // completed. Used by the write-stall mechanism to wait for the stall
     419             :                         // condition to clear. See DB.makeRoomForWrite().
     420             :                         cond sync.Cond
     421             :                         // True when a flush is in progress.
     422             :                         flushing bool
     423             :                         // The number of ongoing non-download compactions.
     424             :                         compactingCount int
     425             :                         // The number of download compactions.
     426             :                         downloadingCount int
     427             :                         // The list of deletion hints, suggesting ranges for delete-only
     428             :                         // compactions.
     429             :                         deletionHints []deleteCompactionHint
     430             :                         // The list of manual compactions. The next manual compaction to perform
     431             :                         // is at the start of the list. New entries are added to the end.
     432             :                         manual []*manualCompaction
     433             :                         // downloads is the list of pending download tasks. The next download to
     434             :                         // perform is at the start of the list. New entries are added to the end.
     435             :                         downloads []*downloadSpanTask
     436             :                         // inProgress is the set of in-progress flushes and compactions.
     437             :                         // It's used in the calculation of some metrics and to initialize L0
     438             :                         // sublevels' state. Some of the compactions contained within this
     439             :                         // map may have already committed an edit to the version but are
     440             :                         // lingering performing cleanup, like deleting obsolete files.
     441             :                         inProgress map[*compaction]struct{}
     442             : 
     443             :                         // rescheduleReadCompaction indicates to an iterator that a read compaction
     444             :                         // should be scheduled.
     445             :                         rescheduleReadCompaction bool
     446             : 
     447             :                         // readCompactions is a readCompactionQueue which keeps track of the
     448             :                         // compactions which we might have to perform.
     449             :                         readCompactions readCompactionQueue
     450             : 
     451             :                         // The cumulative duration of all completed compactions since Open.
     452             :                         // Does not include flushes.
     453             :                         duration time.Duration
     454             :                         // Flush throughput metric.
     455             :                         flushWriteThroughput ThroughputMetric
     456             :                         // The idle start time for the flush "loop", i.e., when the flushing
     457             :                         // bool above transitions to false.
     458             :                         noOngoingFlushStartTime time.Time
     459             :                 }
     460             : 
     461             :                 // Non-zero when file cleaning is disabled. The disabled count acts as a
     462             :                 // reference count to prohibit file cleaning. See
     463             :                 // DB.{disable,Enable}FileDeletions().
     464             :                 disableFileDeletions int
     465             : 
     466             :                 snapshots struct {
     467             :                         // The list of active snapshots.
     468             :                         snapshotList
     469             : 
     470             :                         // The cumulative count and size of snapshot-pinned keys written to
     471             :                         // sstables.
     472             :                         cumulativePinnedCount uint64
     473             :                         cumulativePinnedSize  uint64
     474             :                 }
     475             : 
     476             :                 tableStats struct {
     477             :                         // Condition variable used to signal the completion of a
     478             :                         // job to collect table stats.
     479             :                         cond sync.Cond
     480             :                         // True when a stat collection operation is in progress.
     481             :                         loading bool
     482             :                         // True if stat collection has loaded statistics for all tables
     483             :                         // other than those listed explicitly in pending. This flag starts
     484             :                         // as false when a database is opened and flips to true once stat
     485             :                         // collection has caught up.
     486             :                         loadedInitial bool
     487             :                         // A slice of files for which stats have not been computed.
     488             :                         // Compactions, ingests, flushes append files to be processed. An
     489             :                         // active stat collection goroutine clears the list and processes
     490             :                         // them.
     491             :                         pending []manifest.NewFileEntry
     492             :                 }
     493             : 
     494             :                 tableValidation struct {
     495             :                         // cond is a condition variable used to signal the completion of a
     496             :                         // job to validate one or more sstables.
     497             :                         cond sync.Cond
     498             :                         // pending is a slice of metadata for sstables waiting to be
     499             :                         // validated. Only physical sstables should be added to the pending
     500             :                         // queue.
     501             :                         pending []newFileEntry
     502             :                         // validating is set to true when validation is running.
     503             :                         validating bool
     504             :                 }
     505             :         }
     506             : 
     507             :         // Normally equal to time.Now() but may be overridden in tests.
     508             :         timeNow func() time.Time
     509             :         // the time at database Open; may be used to compute metrics like effective
     510             :         // compaction concurrency
     511             :         openedAt time.Time
     512             : }
     513             : 
     514             : var _ Reader = (*DB)(nil)
     515             : var _ Writer = (*DB)(nil)
     516             : 
     517             : // TestOnlyWaitForCleaning MUST only be used in tests.
     518           1 : func (d *DB) TestOnlyWaitForCleaning() {
     519           1 :         d.cleanupManager.Wait()
     520           1 : }
     521             : 
     522             : // Get gets the value for the given key. It returns ErrNotFound if the DB does
     523             : // not contain the key.
     524             : //
     525             : // The caller should not modify the contents of the returned slice, but it is
     526             : // safe to modify the contents of the argument after Get returns. The returned
     527             : // slice will remain valid until the returned Closer is closed. On success, the
     528             : // caller MUST call closer.Close() or a memory leak will occur.
     529           2 : func (d *DB) Get(key []byte) ([]byte, io.Closer, error) {
     530           2 :         return d.getInternal(key, nil /* batch */, nil /* snapshot */)
     531           2 : }
     532             : 
     533             : type getIterAlloc struct {
     534             :         dbi    Iterator
     535             :         keyBuf []byte
     536             :         get    getIter
     537             : }
     538             : 
     539             : var getIterAllocPool = sync.Pool{
     540           2 :         New: func() interface{} {
     541           2 :                 return &getIterAlloc{}
     542           2 :         },
     543             : }
     544             : 
     545           2 : func (d *DB) getInternal(key []byte, b *Batch, s *Snapshot) ([]byte, io.Closer, error) {
     546           2 :         if err := d.closed.Load(); err != nil {
     547           1 :                 panic(err)
     548             :         }
     549             : 
     550             :         // Grab and reference the current readState. This prevents the underlying
     551             :         // files in the associated version from being deleted if there is a current
     552             :         // compaction. The readState is unref'd by Iterator.Close().
     553           2 :         readState := d.loadReadState()
     554           2 : 
     555           2 :         // Determine the seqnum to read at after grabbing the read state (current and
     556           2 :         // memtables) above.
     557           2 :         var seqNum uint64
     558           2 :         if s != nil {
     559           2 :                 seqNum = s.seqNum
     560           2 :         } else {
     561           2 :                 seqNum = d.mu.versions.visibleSeqNum.Load()
     562           2 :         }
     563             : 
     564           2 :         buf := getIterAllocPool.Get().(*getIterAlloc)
     565           2 : 
     566           2 :         get := &buf.get
     567           2 :         *get = getIter{
     568           2 :                 comparer: d.opts.Comparer,
     569           2 :                 newIters: d.newIters,
     570           2 :                 snapshot: seqNum,
     571           2 :                 iterOpts: IterOptions{
     572           2 :                         // TODO(sumeer): replace with a parameter provided by the caller.
     573           2 :                         CategoryAndQoS: sstable.CategoryAndQoS{
     574           2 :                                 Category: "pebble-get",
     575           2 :                                 QoSLevel: sstable.LatencySensitiveQoSLevel,
     576           2 :                         },
     577           2 :                         logger:                        d.opts.Logger,
     578           2 :                         snapshotForHideObsoletePoints: seqNum,
     579           2 :                 },
     580           2 :                 key: key,
     581           2 :                 // Compute the key prefix for bloom filtering.
     582           2 :                 prefix:  key[:d.opts.Comparer.Split(key)],
     583           2 :                 batch:   b,
     584           2 :                 mem:     readState.memtables,
     585           2 :                 l0:      readState.current.L0SublevelFiles,
     586           2 :                 version: readState.current,
     587           2 :         }
     588           2 : 
     589           2 :         // Strip off memtables which cannot possibly contain the seqNum being read
     590           2 :         // at.
     591           2 :         for len(get.mem) > 0 {
     592           2 :                 n := len(get.mem)
     593           2 :                 if logSeqNum := get.mem[n-1].logSeqNum; logSeqNum < seqNum {
     594           2 :                         break
     595             :                 }
     596           2 :                 get.mem = get.mem[:n-1]
     597             :         }
     598             : 
     599           2 :         i := &buf.dbi
     600           2 :         pointIter := get
     601           2 :         *i = Iterator{
     602           2 :                 ctx:          context.Background(),
     603           2 :                 getIterAlloc: buf,
     604           2 :                 iter:         pointIter,
     605           2 :                 pointIter:    pointIter,
     606           2 :                 merge:        d.merge,
     607           2 :                 comparer:     *d.opts.Comparer,
     608           2 :                 readState:    readState,
     609           2 :                 keyBuf:       buf.keyBuf,
     610           2 :         }
     611           2 : 
     612           2 :         if !i.First() {
     613           2 :                 err := i.Close()
     614           2 :                 if err != nil {
     615           1 :                         return nil, nil, err
     616           1 :                 }
     617           2 :                 return nil, nil, ErrNotFound
     618             :         }
     619           2 :         return i.Value(), i, nil
     620             : }
     621             : 
     622             : // Set sets the value for the given key. It overwrites any previous value
     623             : // for that key; a DB is not a multi-map.
     624             : //
     625             : // It is safe to modify the contents of the arguments after Set returns.
     626           2 : func (d *DB) Set(key, value []byte, opts *WriteOptions) error {
     627           2 :         b := newBatch(d)
     628           2 :         _ = b.Set(key, value, opts)
     629           2 :         if err := d.Apply(b, opts); err != nil {
     630           1 :                 return err
     631           1 :         }
     632             :         // Only release the batch on success.
     633           2 :         return b.Close()
     634             : }
     635             : 
     636             : // Delete deletes the value for the given key. Deletes are blind all will
     637             : // succeed even if the given key does not exist.
     638             : //
     639             : // It is safe to modify the contents of the arguments after Delete returns.
     640           2 : func (d *DB) Delete(key []byte, opts *WriteOptions) error {
     641           2 :         b := newBatch(d)
     642           2 :         _ = b.Delete(key, opts)
     643           2 :         if err := d.Apply(b, opts); err != nil {
     644           1 :                 return err
     645           1 :         }
     646             :         // Only release the batch on success.
     647           2 :         return b.Close()
     648             : }
     649             : 
     650             : // DeleteSized behaves identically to Delete, but takes an additional
     651             : // argument indicating the size of the value being deleted. DeleteSized
     652             : // should be preferred when the caller has the expectation that there exists
     653             : // a single internal KV pair for the key (eg, the key has not been
     654             : // overwritten recently), and the caller knows the size of its value.
     655             : //
     656             : // DeleteSized will record the value size within the tombstone and use it to
     657             : // inform compaction-picking heuristics which strive to reduce space
     658             : // amplification in the LSM. This "calling your shot" mechanic allows the
     659             : // storage engine to more accurately estimate and reduce space amplification.
     660             : //
     661             : // It is safe to modify the contents of the arguments after DeleteSized
     662             : // returns.
     663           2 : func (d *DB) DeleteSized(key []byte, valueSize uint32, opts *WriteOptions) error {
     664           2 :         b := newBatch(d)
     665           2 :         _ = b.DeleteSized(key, valueSize, opts)
     666           2 :         if err := d.Apply(b, opts); err != nil {
     667           0 :                 return err
     668           0 :         }
     669             :         // Only release the batch on success.
     670           2 :         return b.Close()
     671             : }
     672             : 
     673             : // SingleDelete adds an action to the batch that single deletes the entry for key.
     674             : // See Writer.SingleDelete for more details on the semantics of SingleDelete.
     675             : //
     676             : // It is safe to modify the contents of the arguments after SingleDelete returns.
     677           2 : func (d *DB) SingleDelete(key []byte, opts *WriteOptions) error {
     678           2 :         b := newBatch(d)
     679           2 :         _ = b.SingleDelete(key, opts)
     680           2 :         if err := d.Apply(b, opts); err != nil {
     681           0 :                 return err
     682           0 :         }
     683             :         // Only release the batch on success.
     684           2 :         return b.Close()
     685             : }
     686             : 
     687             : // DeleteRange deletes all of the keys (and values) in the range [start,end)
     688             : // (inclusive on start, exclusive on end).
     689             : //
     690             : // It is safe to modify the contents of the arguments after DeleteRange
     691             : // returns.
     692           2 : func (d *DB) DeleteRange(start, end []byte, opts *WriteOptions) error {
     693           2 :         b := newBatch(d)
     694           2 :         _ = b.DeleteRange(start, end, opts)
     695           2 :         if err := d.Apply(b, opts); err != nil {
     696           1 :                 return err
     697           1 :         }
     698             :         // Only release the batch on success.
     699           2 :         return b.Close()
     700             : }
     701             : 
     702             : // Merge adds an action to the DB that merges the value at key with the new
     703             : // value. The details of the merge are dependent upon the configured merge
     704             : // operator.
     705             : //
     706             : // It is safe to modify the contents of the arguments after Merge returns.
     707           2 : func (d *DB) Merge(key, value []byte, opts *WriteOptions) error {
     708           2 :         b := newBatch(d)
     709           2 :         _ = b.Merge(key, value, opts)
     710           2 :         if err := d.Apply(b, opts); err != nil {
     711           1 :                 return err
     712           1 :         }
     713             :         // Only release the batch on success.
     714           2 :         return b.Close()
     715             : }
     716             : 
     717             : // LogData adds the specified to the batch. The data will be written to the
     718             : // WAL, but not added to memtables or sstables. Log data is never indexed,
     719             : // which makes it useful for testing WAL performance.
     720             : //
     721             : // It is safe to modify the contents of the argument after LogData returns.
     722           2 : func (d *DB) LogData(data []byte, opts *WriteOptions) error {
     723           2 :         b := newBatch(d)
     724           2 :         _ = b.LogData(data, opts)
     725           2 :         if err := d.Apply(b, opts); err != nil {
     726           1 :                 return err
     727           1 :         }
     728             :         // Only release the batch on success.
     729           2 :         return b.Close()
     730             : }
     731             : 
     732             : // RangeKeySet sets a range key mapping the key range [start, end) at the MVCC
     733             : // timestamp suffix to value. The suffix is optional. If any portion of the key
     734             : // range [start, end) is already set by a range key with the same suffix value,
     735             : // RangeKeySet overrides it.
     736             : //
     737             : // It is safe to modify the contents of the arguments after RangeKeySet returns.
     738           2 : func (d *DB) RangeKeySet(start, end, suffix, value []byte, opts *WriteOptions) error {
     739           2 :         b := newBatch(d)
     740           2 :         _ = b.RangeKeySet(start, end, suffix, value, opts)
     741           2 :         if err := d.Apply(b, opts); err != nil {
     742           0 :                 return err
     743           0 :         }
     744             :         // Only release the batch on success.
     745           2 :         return b.Close()
     746             : }
     747             : 
     748             : // RangeKeyUnset removes a range key mapping the key range [start, end) at the
     749             : // MVCC timestamp suffix. The suffix may be omitted to remove an unsuffixed
     750             : // range key. RangeKeyUnset only removes portions of range keys that fall within
     751             : // the [start, end) key span, and only range keys with suffixes that exactly
     752             : // match the unset suffix.
     753             : //
     754             : // It is safe to modify the contents of the arguments after RangeKeyUnset
     755             : // returns.
     756           2 : func (d *DB) RangeKeyUnset(start, end, suffix []byte, opts *WriteOptions) error {
     757           2 :         b := newBatch(d)
     758           2 :         _ = b.RangeKeyUnset(start, end, suffix, opts)
     759           2 :         if err := d.Apply(b, opts); err != nil {
     760           0 :                 return err
     761           0 :         }
     762             :         // Only release the batch on success.
     763           2 :         return b.Close()
     764             : }
     765             : 
     766             : // RangeKeyDelete deletes all of the range keys in the range [start,end)
     767             : // (inclusive on start, exclusive on end). It does not delete point keys (for
     768             : // that use DeleteRange). RangeKeyDelete removes all range keys within the
     769             : // bounds, including those with or without suffixes.
     770             : //
     771             : // It is safe to modify the contents of the arguments after RangeKeyDelete
     772             : // returns.
     773           2 : func (d *DB) RangeKeyDelete(start, end []byte, opts *WriteOptions) error {
     774           2 :         b := newBatch(d)
     775           2 :         _ = b.RangeKeyDelete(start, end, opts)
     776           2 :         if err := d.Apply(b, opts); err != nil {
     777           0 :                 return err
     778           0 :         }
     779             :         // Only release the batch on success.
     780           2 :         return b.Close()
     781             : }
     782             : 
     783             : // Apply the operations contained in the batch to the DB. If the batch is large
     784             : // the contents of the batch may be retained by the database. If that occurs
     785             : // the batch contents will be cleared preventing the caller from attempting to
     786             : // reuse them.
     787             : //
     788             : // It is safe to modify the contents of the arguments after Apply returns.
     789             : //
     790             : // Apply returns ErrInvalidBatch if the provided batch is invalid in any way.
     791           2 : func (d *DB) Apply(batch *Batch, opts *WriteOptions) error {
     792           2 :         return d.applyInternal(batch, opts, false)
     793           2 : }
     794             : 
     795             : // ApplyNoSyncWait must only be used when opts.Sync is true and the caller
     796             : // does not want to wait for the WAL fsync to happen. The method will return
     797             : // once the mutation is applied to the memtable and is visible (note that a
     798             : // mutation is visible before the WAL sync even in the wait case, so we have
     799             : // not weakened the durability semantics). The caller must call Batch.SyncWait
     800             : // to wait for the WAL fsync. The caller must not Close the batch without
     801             : // first calling Batch.SyncWait.
     802             : //
     803             : // RECOMMENDATION: Prefer using Apply unless you really understand why you
     804             : // need ApplyNoSyncWait.
     805             : // EXPERIMENTAL: API/feature subject to change. Do not yet use outside
     806             : // CockroachDB.
     807           2 : func (d *DB) ApplyNoSyncWait(batch *Batch, opts *WriteOptions) error {
     808           2 :         if !opts.Sync {
     809           0 :                 return errors.Errorf("cannot request asynchonous apply when WriteOptions.Sync is false")
     810           0 :         }
     811           2 :         return d.applyInternal(batch, opts, true)
     812             : }
     813             : 
     814             : // REQUIRES: noSyncWait => opts.Sync
     815           2 : func (d *DB) applyInternal(batch *Batch, opts *WriteOptions, noSyncWait bool) error {
     816           2 :         if err := d.closed.Load(); err != nil {
     817           1 :                 panic(err)
     818             :         }
     819           2 :         if batch.committing {
     820           0 :                 panic("pebble: batch already committing")
     821             :         }
     822           2 :         if batch.applied.Load() {
     823           0 :                 panic("pebble: batch already applied")
     824             :         }
     825           2 :         if d.opts.ReadOnly {
     826           1 :                 return ErrReadOnly
     827           1 :         }
     828           2 :         if batch.db != nil && batch.db != d {
     829           1 :                 panic(fmt.Sprintf("pebble: batch db mismatch: %p != %p", batch.db, d))
     830             :         }
     831             : 
     832           2 :         sync := opts.GetSync()
     833           2 :         if sync && d.opts.DisableWAL {
     834           0 :                 return errors.New("pebble: WAL disabled")
     835           0 :         }
     836             : 
     837           2 :         if fmv := d.FormatMajorVersion(); fmv < batch.minimumFormatMajorVersion {
     838           0 :                 panic(fmt.Sprintf(
     839           0 :                         "pebble: batch requires at least format major version %d (current: %d)",
     840           0 :                         batch.minimumFormatMajorVersion, fmv,
     841           0 :                 ))
     842             :         }
     843             : 
     844           2 :         if batch.countRangeKeys > 0 {
     845           2 :                 if d.split == nil {
     846           0 :                         return errNoSplit
     847           0 :                 }
     848             :         }
     849           2 :         batch.committing = true
     850           2 : 
     851           2 :         if batch.db == nil {
     852           1 :                 if err := batch.refreshMemTableSize(); err != nil {
     853           0 :                         return err
     854           0 :                 }
     855             :         }
     856           2 :         if batch.memTableSize >= d.largeBatchThreshold {
     857           2 :                 var err error
     858           2 :                 batch.flushable, err = newFlushableBatch(batch, d.opts.Comparer)
     859           2 :                 if err != nil {
     860           0 :                         return err
     861           0 :                 }
     862             :         }
     863           2 :         if err := d.commit.Commit(batch, sync, noSyncWait); err != nil {
     864           0 :                 // There isn't much we can do on an error here. The commit pipeline will be
     865           0 :                 // horked at this point.
     866           0 :                 d.opts.Logger.Fatalf("pebble: fatal commit error: %v", err)
     867           0 :         }
     868             :         // If this is a large batch, we need to clear the batch contents as the
     869             :         // flushable batch may still be present in the flushables queue.
     870             :         //
     871             :         // TODO(peter): Currently large batches are written to the WAL. We could
     872             :         // skip the WAL write and instead wait for the large batch to be flushed to
     873             :         // an sstable. For a 100 MB batch, this might actually be faster. For a 1
     874             :         // GB batch this is almost certainly faster.
     875           2 :         if batch.flushable != nil {
     876           2 :                 batch.data = nil
     877           2 :         }
     878           2 :         return nil
     879             : }
     880             : 
     881           2 : func (d *DB) commitApply(b *Batch, mem *memTable) error {
     882           2 :         if b.flushable != nil {
     883           2 :                 // This is a large batch which was already added to the immutable queue.
     884           2 :                 return nil
     885           2 :         }
     886           2 :         err := mem.apply(b, b.SeqNum())
     887           2 :         if err != nil {
     888           0 :                 return err
     889           0 :         }
     890             : 
     891             :         // If the batch contains range tombstones and the database is configured
     892             :         // to flush range deletions, schedule a delayed flush so that disk space
     893             :         // may be reclaimed without additional writes or an explicit flush.
     894           2 :         if b.countRangeDels > 0 && d.opts.FlushDelayDeleteRange > 0 {
     895           2 :                 d.mu.Lock()
     896           2 :                 d.maybeScheduleDelayedFlush(mem, d.opts.FlushDelayDeleteRange)
     897           2 :                 d.mu.Unlock()
     898           2 :         }
     899             : 
     900             :         // If the batch contains range keys and the database is configured to flush
     901             :         // range keys, schedule a delayed flush so that the range keys are cleared
     902             :         // from the memtable.
     903           2 :         if b.countRangeKeys > 0 && d.opts.FlushDelayRangeKey > 0 {
     904           2 :                 d.mu.Lock()
     905           2 :                 d.maybeScheduleDelayedFlush(mem, d.opts.FlushDelayRangeKey)
     906           2 :                 d.mu.Unlock()
     907           2 :         }
     908             : 
     909           2 :         if mem.writerUnref() {
     910           2 :                 d.mu.Lock()
     911           2 :                 d.maybeScheduleFlush()
     912           2 :                 d.mu.Unlock()
     913           2 :         }
     914           2 :         return nil
     915             : }
     916             : 
     917           2 : func (d *DB) commitWrite(b *Batch, syncWG *sync.WaitGroup, syncErr *error) (*memTable, error) {
     918           2 :         var size int64
     919           2 :         repr := b.Repr()
     920           2 : 
     921           2 :         if b.flushable != nil {
     922           2 :                 // We have a large batch. Such batches are special in that they don't get
     923           2 :                 // added to the memtable, and are instead inserted into the queue of
     924           2 :                 // memtables. The call to makeRoomForWrite with this batch will force the
     925           2 :                 // current memtable to be flushed. We want the large batch to be part of
     926           2 :                 // the same log, so we add it to the WAL here, rather than after the call
     927           2 :                 // to makeRoomForWrite().
     928           2 :                 //
     929           2 :                 // Set the sequence number since it was not set to the correct value earlier
     930           2 :                 // (see comment in newFlushableBatch()).
     931           2 :                 b.flushable.setSeqNum(b.SeqNum())
     932           2 :                 if !d.opts.DisableWAL {
     933           2 :                         var err error
     934           2 :                         size, err = d.mu.log.writer.WriteRecord(repr, wal.SyncOptions{Done: syncWG, Err: syncErr}, b)
     935           2 :                         if err != nil {
     936           0 :                                 panic(err)
     937             :                         }
     938             :                 }
     939             :         }
     940             : 
     941           2 :         var err error
     942           2 :         // Grab a reference to the memtable. We don't hold DB.mu, but we do hold
     943           2 :         // d.commit.mu. It's okay for readers of d.mu.mem.mutable to only hold one of
     944           2 :         // d.commit.mu or d.mu, because memtable rotations require holding both.
     945           2 :         mem := d.mu.mem.mutable
     946           2 :         // Batches which contain keys of kind InternalKeyKindIngestSST will
     947           2 :         // never be applied to the memtable, so we don't need to make room for
     948           2 :         // write.
     949           2 :         if !b.ingestedSSTBatch {
     950           2 :                 // Flushable batches will require a rotation of the memtable regardless,
     951           2 :                 // so only attempt an optimistic reservation of space in the current
     952           2 :                 // memtable if this batch is not a large flushable batch.
     953           2 :                 if b.flushable == nil {
     954           2 :                         err = d.mu.mem.mutable.prepare(b)
     955           2 :                 }
     956           2 :                 if b.flushable != nil || err == arenaskl.ErrArenaFull {
     957           2 :                         // Slow path.
     958           2 :                         // We need to acquire DB.mu and rotate the memtable.
     959           2 :                         func() {
     960           2 :                                 d.mu.Lock()
     961           2 :                                 defer d.mu.Unlock()
     962           2 :                                 err = d.makeRoomForWrite(b)
     963           2 :                                 mem = d.mu.mem.mutable
     964           2 :                         }()
     965             :                 }
     966             :         }
     967           2 :         if err != nil {
     968           0 :                 return nil, err
     969           0 :         }
     970           2 :         if d.opts.DisableWAL {
     971           2 :                 return mem, nil
     972           2 :         }
     973           2 :         d.logBytesIn.Add(uint64(len(repr)))
     974           2 : 
     975           2 :         if b.flushable == nil {
     976           2 :                 size, err = d.mu.log.writer.WriteRecord(repr, wal.SyncOptions{Done: syncWG, Err: syncErr}, b)
     977           2 :                 if err != nil {
     978           0 :                         panic(err)
     979             :                 }
     980             :         }
     981             : 
     982           2 :         d.logSize.Store(uint64(size))
     983           2 :         return mem, err
     984             : }
     985             : 
     986             : type iterAlloc struct {
     987             :         dbi                 Iterator
     988             :         keyBuf              []byte
     989             :         boundsBuf           [2][]byte
     990             :         prefixOrFullSeekKey []byte
     991             :         merging             mergingIter
     992             :         mlevels             [3 + numLevels]mergingIterLevel
     993             :         levels              [3 + numLevels]levelIter
     994             :         levelsPositioned    [3 + numLevels]bool
     995             : }
     996             : 
     997             : var iterAllocPool = sync.Pool{
     998           2 :         New: func() interface{} {
     999           2 :                 return &iterAlloc{}
    1000           2 :         },
    1001             : }
    1002             : 
    1003             : // snapshotIterOpts denotes snapshot-related iterator options when calling
    1004             : // newIter. These are the possible cases for a snapshotIterOpts:
    1005             : //   - No snapshot: All fields are zero values.
    1006             : //   - Classic snapshot: Only `seqNum` is set. The latest readState will be used
    1007             : //     and the specified seqNum will be used as the snapshot seqNum.
    1008             : //   - EventuallyFileOnlySnapshot (EFOS) behaving as a classic snapshot. Only
    1009             : //     the `seqNum` is set. The latest readState will be used
    1010             : //     and the specified seqNum will be used as the snapshot seqNum.
    1011             : //   - EFOS in file-only state: Only `seqNum` and `vers` are set. All the
    1012             : //     relevant SSTs are referenced by the *version.
    1013             : //   - EFOS that has been excised but is in alwaysCreateIters mode (tests only).
    1014             : //     Only `seqNum` and `readState` are set.
    1015             : type snapshotIterOpts struct {
    1016             :         seqNum    uint64
    1017             :         vers      *version
    1018             :         readState *readState
    1019             : }
    1020             : 
    1021             : type batchIterOpts struct {
    1022             :         batchOnly bool
    1023             : }
    1024             : type newIterOpts struct {
    1025             :         snapshot snapshotIterOpts
    1026             :         batch    batchIterOpts
    1027             : }
    1028             : 
    1029             : // newIter constructs a new iterator, merging in batch iterators as an extra
    1030             : // level.
    1031             : func (d *DB) newIter(
    1032             :         ctx context.Context, batch *Batch, internalOpts newIterOpts, o *IterOptions,
    1033           2 : ) *Iterator {
    1034           2 :         if internalOpts.batch.batchOnly {
    1035           1 :                 if batch == nil {
    1036           0 :                         panic("batchOnly is true, but batch is nil")
    1037             :                 }
    1038           1 :                 if internalOpts.snapshot.vers != nil {
    1039           0 :                         panic("batchOnly is true, but snapshotIterOpts is initialized")
    1040             :                 }
    1041             :         }
    1042           2 :         if err := d.closed.Load(); err != nil {
    1043           1 :                 panic(err)
    1044             :         }
    1045           2 :         seqNum := internalOpts.snapshot.seqNum
    1046           2 :         if o != nil && o.RangeKeyMasking.Suffix != nil && o.KeyTypes != IterKeyTypePointsAndRanges {
    1047           0 :                 panic("pebble: range key masking requires IterKeyTypePointsAndRanges")
    1048             :         }
    1049           2 :         if (batch != nil || seqNum != 0) && (o != nil && o.OnlyReadGuaranteedDurable) {
    1050           1 :                 // We could add support for OnlyReadGuaranteedDurable on snapshots if
    1051           1 :                 // there was a need: this would require checking that the sequence number
    1052           1 :                 // of the snapshot has been flushed, by comparing with
    1053           1 :                 // DB.mem.queue[0].logSeqNum.
    1054           1 :                 panic("OnlyReadGuaranteedDurable is not supported for batches or snapshots")
    1055             :         }
    1056           2 :         var readState *readState
    1057           2 :         var newIters tableNewIters
    1058           2 :         var newIterRangeKey keyspanimpl.TableNewSpanIter
    1059           2 :         if !internalOpts.batch.batchOnly {
    1060           2 :                 // Grab and reference the current readState. This prevents the underlying
    1061           2 :                 // files in the associated version from being deleted if there is a current
    1062           2 :                 // compaction. The readState is unref'd by Iterator.Close().
    1063           2 :                 if internalOpts.snapshot.vers == nil {
    1064           2 :                         if internalOpts.snapshot.readState != nil {
    1065           0 :                                 readState = internalOpts.snapshot.readState
    1066           0 :                                 readState.ref()
    1067           2 :                         } else {
    1068           2 :                                 // NB: loadReadState() calls readState.ref().
    1069           2 :                                 readState = d.loadReadState()
    1070           2 :                         }
    1071           2 :                 } else {
    1072           2 :                         // vers != nil
    1073           2 :                         internalOpts.snapshot.vers.Ref()
    1074           2 :                 }
    1075             : 
    1076             :                 // Determine the seqnum to read at after grabbing the read state (current and
    1077             :                 // memtables) above.
    1078           2 :                 if seqNum == 0 {
    1079           2 :                         seqNum = d.mu.versions.visibleSeqNum.Load()
    1080           2 :                 }
    1081           2 :                 newIters = d.newIters
    1082           2 :                 newIterRangeKey = d.tableNewRangeKeyIter
    1083             :         }
    1084             : 
    1085             :         // Bundle various structures under a single umbrella in order to allocate
    1086             :         // them together.
    1087           2 :         buf := iterAllocPool.Get().(*iterAlloc)
    1088           2 :         dbi := &buf.dbi
    1089           2 :         *dbi = Iterator{
    1090           2 :                 ctx:                 ctx,
    1091           2 :                 alloc:               buf,
    1092           2 :                 merge:               d.merge,
    1093           2 :                 comparer:            *d.opts.Comparer,
    1094           2 :                 readState:           readState,
    1095           2 :                 version:             internalOpts.snapshot.vers,
    1096           2 :                 keyBuf:              buf.keyBuf,
    1097           2 :                 prefixOrFullSeekKey: buf.prefixOrFullSeekKey,
    1098           2 :                 boundsBuf:           buf.boundsBuf,
    1099           2 :                 batch:               batch,
    1100           2 :                 newIters:            newIters,
    1101           2 :                 newIterRangeKey:     newIterRangeKey,
    1102           2 :                 seqNum:              seqNum,
    1103           2 :                 batchOnlyIter:       internalOpts.batch.batchOnly,
    1104           2 :         }
    1105           2 :         if o != nil {
    1106           2 :                 dbi.opts = *o
    1107           2 :                 dbi.processBounds(o.LowerBound, o.UpperBound)
    1108           2 :         }
    1109           2 :         dbi.opts.logger = d.opts.Logger
    1110           2 :         if d.opts.private.disableLazyCombinedIteration {
    1111           1 :                 dbi.opts.disableLazyCombinedIteration = true
    1112           1 :         }
    1113           2 :         if batch != nil {
    1114           2 :                 dbi.batchSeqNum = dbi.batch.nextSeqNum()
    1115           2 :         }
    1116           2 :         return finishInitializingIter(ctx, buf)
    1117             : }
    1118             : 
    1119             : // finishInitializingIter is a helper for doing the non-trivial initialization
    1120             : // of an Iterator. It's invoked to perform the initial initialization of an
    1121             : // Iterator during NewIter or Clone, and to perform reinitialization due to a
    1122             : // change in IterOptions by a call to Iterator.SetOptions.
    1123           2 : func finishInitializingIter(ctx context.Context, buf *iterAlloc) *Iterator {
    1124           2 :         // Short-hand.
    1125           2 :         dbi := &buf.dbi
    1126           2 :         var memtables flushableList
    1127           2 :         if dbi.readState != nil {
    1128           2 :                 memtables = dbi.readState.memtables
    1129           2 :         }
    1130           2 :         if dbi.opts.OnlyReadGuaranteedDurable {
    1131           1 :                 memtables = nil
    1132           2 :         } else {
    1133           2 :                 // We only need to read from memtables which contain sequence numbers older
    1134           2 :                 // than seqNum. Trim off newer memtables.
    1135           2 :                 for i := len(memtables) - 1; i >= 0; i-- {
    1136           2 :                         if logSeqNum := memtables[i].logSeqNum; logSeqNum < dbi.seqNum {
    1137           2 :                                 break
    1138             :                         }
    1139           2 :                         memtables = memtables[:i]
    1140             :                 }
    1141             :         }
    1142             : 
    1143           2 :         if dbi.opts.pointKeys() {
    1144           2 :                 // Construct the point iterator, initializing dbi.pointIter to point to
    1145           2 :                 // dbi.merging. If this is called during a SetOptions call and this
    1146           2 :                 // Iterator has already initialized dbi.merging, constructPointIter is a
    1147           2 :                 // noop and an initialized pointIter already exists in dbi.pointIter.
    1148           2 :                 dbi.constructPointIter(ctx, memtables, buf)
    1149           2 :                 dbi.iter = dbi.pointIter
    1150           2 :         } else {
    1151           2 :                 dbi.iter = emptyIter
    1152           2 :         }
    1153             : 
    1154           2 :         if dbi.opts.rangeKeys() {
    1155           2 :                 dbi.rangeKeyMasking.init(dbi, dbi.comparer.Compare, dbi.comparer.Split)
    1156           2 : 
    1157           2 :                 // When iterating over both point and range keys, don't create the
    1158           2 :                 // range-key iterator stack immediately if we can avoid it. This
    1159           2 :                 // optimization takes advantage of the expected sparseness of range
    1160           2 :                 // keys, and configures the point-key iterator to dynamically switch to
    1161           2 :                 // combined iteration when it observes a file containing range keys.
    1162           2 :                 //
    1163           2 :                 // Lazy combined iteration is not possible if a batch or a memtable
    1164           2 :                 // contains any range keys.
    1165           2 :                 useLazyCombinedIteration := dbi.rangeKey == nil &&
    1166           2 :                         dbi.opts.KeyTypes == IterKeyTypePointsAndRanges &&
    1167           2 :                         (dbi.batch == nil || dbi.batch.countRangeKeys == 0) &&
    1168           2 :                         !dbi.opts.disableLazyCombinedIteration
    1169           2 :                 if useLazyCombinedIteration {
    1170           2 :                         // The user requested combined iteration, and there's no indexed
    1171           2 :                         // batch currently containing range keys that would prevent lazy
    1172           2 :                         // combined iteration. Check the memtables to see if they contain
    1173           2 :                         // any range keys.
    1174           2 :                         for i := range memtables {
    1175           2 :                                 if memtables[i].containsRangeKeys() {
    1176           2 :                                         useLazyCombinedIteration = false
    1177           2 :                                         break
    1178             :                                 }
    1179             :                         }
    1180             :                 }
    1181             : 
    1182           2 :                 if useLazyCombinedIteration {
    1183           2 :                         dbi.lazyCombinedIter = lazyCombinedIter{
    1184           2 :                                 parent:    dbi,
    1185           2 :                                 pointIter: dbi.pointIter,
    1186           2 :                                 combinedIterState: combinedIterState{
    1187           2 :                                         initialized: false,
    1188           2 :                                 },
    1189           2 :                         }
    1190           2 :                         dbi.iter = &dbi.lazyCombinedIter
    1191           2 :                         dbi.iter = invalidating.MaybeWrapIfInvariants(dbi.iter)
    1192           2 :                 } else {
    1193           2 :                         dbi.lazyCombinedIter.combinedIterState = combinedIterState{
    1194           2 :                                 initialized: true,
    1195           2 :                         }
    1196           2 :                         if dbi.rangeKey == nil {
    1197           2 :                                 dbi.rangeKey = iterRangeKeyStateAllocPool.Get().(*iteratorRangeKeyState)
    1198           2 :                                 dbi.rangeKey.init(dbi.comparer.Compare, dbi.comparer.Split, &dbi.opts)
    1199           2 :                                 dbi.constructRangeKeyIter()
    1200           2 :                         } else {
    1201           2 :                                 dbi.rangeKey.iterConfig.SetBounds(dbi.opts.LowerBound, dbi.opts.UpperBound)
    1202           2 :                         }
    1203             : 
    1204             :                         // Wrap the point iterator (currently dbi.iter) with an interleaving
    1205             :                         // iterator that interleaves range keys pulled from
    1206             :                         // dbi.rangeKey.rangeKeyIter.
    1207             :                         //
    1208             :                         // NB: The interleaving iterator is always reinitialized, even if
    1209             :                         // dbi already had an initialized range key iterator, in case the point
    1210             :                         // iterator changed or the range key masking suffix changed.
    1211           2 :                         dbi.rangeKey.iiter.Init(&dbi.comparer, dbi.iter, dbi.rangeKey.rangeKeyIter,
    1212           2 :                                 keyspan.InterleavingIterOpts{
    1213           2 :                                         Mask:       &dbi.rangeKeyMasking,
    1214           2 :                                         LowerBound: dbi.opts.LowerBound,
    1215           2 :                                         UpperBound: dbi.opts.UpperBound,
    1216           2 :                                 })
    1217           2 :                         dbi.iter = &dbi.rangeKey.iiter
    1218             :                 }
    1219           2 :         } else {
    1220           2 :                 // !dbi.opts.rangeKeys()
    1221           2 :                 //
    1222           2 :                 // Reset the combined iterator state. The initialized=true ensures the
    1223           2 :                 // iterator doesn't unnecessarily try to switch to combined iteration.
    1224           2 :                 dbi.lazyCombinedIter.combinedIterState = combinedIterState{initialized: true}
    1225           2 :         }
    1226           2 :         return dbi
    1227             : }
    1228             : 
    1229             : // ScanInternal scans all internal keys within the specified bounds, truncating
    1230             : // any rangedels and rangekeys to those bounds if they span past them. For use
    1231             : // when an external user needs to be aware of all internal keys that make up a
    1232             : // key range.
    1233             : //
    1234             : // Keys deleted by range deletions must not be returned or exposed by this
    1235             : // method, while the range deletion deleting that key must be exposed using
    1236             : // visitRangeDel. Keys that would be masked by range key masking (if an
    1237             : // appropriate prefix were set) should be exposed, alongside the range key
    1238             : // that would have masked it. This method also collapses all point keys into
    1239             : // one InternalKey; so only one internal key at most per user key is returned
    1240             : // to visitPointKey.
    1241             : //
    1242             : // If visitSharedFile is not nil, ScanInternal iterates in skip-shared iteration
    1243             : // mode. In this iteration mode, sstables in levels L5 and L6 are skipped, and
    1244             : // their metadatas truncated to [lower, upper) and passed into visitSharedFile.
    1245             : // ErrInvalidSkipSharedIteration is returned if visitSharedFile is not nil and an
    1246             : // sstable in L5 or L6 is found that is not in shared storage according to
    1247             : // provider.IsShared, or an sstable in those levels contains a newer key than the
    1248             : // snapshot sequence number (only applicable for snapshot.ScanInternal). Examples
    1249             : // of when this could happen could be if Pebble started writing sstables before a
    1250             : // creator ID was set (as creator IDs are necessary to enable shared storage)
    1251             : // resulting in some lower level SSTs being on non-shared storage. Skip-shared
    1252             : // iteration is invalid in those cases.
    1253             : func (d *DB) ScanInternal(
    1254             :         ctx context.Context,
    1255             :         categoryAndQoS sstable.CategoryAndQoS,
    1256             :         lower, upper []byte,
    1257             :         visitPointKey func(key *InternalKey, value LazyValue, iterInfo IteratorLevel) error,
    1258             :         visitRangeDel func(start, end []byte, seqNum uint64) error,
    1259             :         visitRangeKey func(start, end []byte, keys []rangekey.Key) error,
    1260             :         visitSharedFile func(sst *SharedSSTMeta) error,
    1261             :         visitExternalFile func(sst *ExternalFile) error,
    1262           2 : ) error {
    1263           2 :         scanInternalOpts := &scanInternalOptions{
    1264           2 :                 CategoryAndQoS:    categoryAndQoS,
    1265           2 :                 visitPointKey:     visitPointKey,
    1266           2 :                 visitRangeDel:     visitRangeDel,
    1267           2 :                 visitRangeKey:     visitRangeKey,
    1268           2 :                 visitSharedFile:   visitSharedFile,
    1269           2 :                 visitExternalFile: visitExternalFile,
    1270           2 :                 IterOptions: IterOptions{
    1271           2 :                         KeyTypes:   IterKeyTypePointsAndRanges,
    1272           2 :                         LowerBound: lower,
    1273           2 :                         UpperBound: upper,
    1274           2 :                 },
    1275           2 :         }
    1276           2 :         iter, err := d.newInternalIter(ctx, snapshotIterOpts{} /* snapshot */, scanInternalOpts)
    1277           2 :         if err != nil {
    1278           0 :                 return err
    1279           0 :         }
    1280           2 :         defer iter.close()
    1281           2 :         return scanInternalImpl(ctx, lower, upper, iter, scanInternalOpts)
    1282             : }
    1283             : 
    1284             : // newInternalIter constructs and returns a new scanInternalIterator on this db.
    1285             : // If o.skipSharedLevels is true, levels below sharedLevelsStart are *not* added
    1286             : // to the internal iterator.
    1287             : //
    1288             : // TODO(bilal): This method has a lot of similarities with db.newIter as well as
    1289             : // finishInitializingIter. Both pairs of methods should be refactored to reduce
    1290             : // this duplication.
    1291             : func (d *DB) newInternalIter(
    1292             :         ctx context.Context, sOpts snapshotIterOpts, o *scanInternalOptions,
    1293           2 : ) (*scanInternalIterator, error) {
    1294           2 :         if err := d.closed.Load(); err != nil {
    1295           0 :                 panic(err)
    1296             :         }
    1297             :         // Grab and reference the current readState. This prevents the underlying
    1298             :         // files in the associated version from being deleted if there is a current
    1299             :         // compaction. The readState is unref'd by Iterator.Close().
    1300           2 :         var readState *readState
    1301           2 :         if sOpts.vers == nil {
    1302           2 :                 if sOpts.readState != nil {
    1303           0 :                         readState = sOpts.readState
    1304           0 :                         readState.ref()
    1305           2 :                 } else {
    1306           2 :                         readState = d.loadReadState()
    1307           2 :                 }
    1308             :         }
    1309           2 :         if sOpts.vers != nil {
    1310           1 :                 sOpts.vers.Ref()
    1311           1 :         }
    1312             : 
    1313             :         // Determine the seqnum to read at after grabbing the read state (current and
    1314             :         // memtables) above.
    1315           2 :         seqNum := sOpts.seqNum
    1316           2 :         if seqNum == 0 {
    1317           2 :                 seqNum = d.mu.versions.visibleSeqNum.Load()
    1318           2 :         }
    1319             : 
    1320             :         // Bundle various structures under a single umbrella in order to allocate
    1321             :         // them together.
    1322           2 :         buf := iterAllocPool.Get().(*iterAlloc)
    1323           2 :         dbi := &scanInternalIterator{
    1324           2 :                 ctx:             ctx,
    1325           2 :                 db:              d,
    1326           2 :                 comparer:        d.opts.Comparer,
    1327           2 :                 merge:           d.opts.Merger.Merge,
    1328           2 :                 readState:       readState,
    1329           2 :                 version:         sOpts.vers,
    1330           2 :                 alloc:           buf,
    1331           2 :                 newIters:        d.newIters,
    1332           2 :                 newIterRangeKey: d.tableNewRangeKeyIter,
    1333           2 :                 seqNum:          seqNum,
    1334           2 :                 mergingIter:     &buf.merging,
    1335           2 :         }
    1336           2 :         dbi.opts = *o
    1337           2 :         dbi.opts.logger = d.opts.Logger
    1338           2 :         if d.opts.private.disableLazyCombinedIteration {
    1339           1 :                 dbi.opts.disableLazyCombinedIteration = true
    1340           1 :         }
    1341           2 :         return finishInitializingInternalIter(buf, dbi)
    1342             : }
    1343             : 
    1344             : func finishInitializingInternalIter(
    1345             :         buf *iterAlloc, i *scanInternalIterator,
    1346           2 : ) (*scanInternalIterator, error) {
    1347           2 :         // Short-hand.
    1348           2 :         var memtables flushableList
    1349           2 :         if i.readState != nil {
    1350           2 :                 memtables = i.readState.memtables
    1351           2 :         }
    1352             :         // We only need to read from memtables which contain sequence numbers older
    1353             :         // than seqNum. Trim off newer memtables.
    1354           2 :         for j := len(memtables) - 1; j >= 0; j-- {
    1355           2 :                 if logSeqNum := memtables[j].logSeqNum; logSeqNum < i.seqNum {
    1356           2 :                         break
    1357             :                 }
    1358           2 :                 memtables = memtables[:j]
    1359             :         }
    1360           2 :         i.initializeBoundBufs(i.opts.LowerBound, i.opts.UpperBound)
    1361           2 : 
    1362           2 :         if err := i.constructPointIter(i.opts.CategoryAndQoS, memtables, buf); err != nil {
    1363           0 :                 return nil, err
    1364           0 :         }
    1365             : 
    1366             :         // For internal iterators, we skip the lazy combined iteration optimization
    1367             :         // entirely, and create the range key iterator stack directly.
    1368           2 :         i.rangeKey = iterRangeKeyStateAllocPool.Get().(*iteratorRangeKeyState)
    1369           2 :         i.rangeKey.init(i.comparer.Compare, i.comparer.Split, &i.opts.IterOptions)
    1370           2 :         if err := i.constructRangeKeyIter(); err != nil {
    1371           0 :                 return nil, err
    1372           0 :         }
    1373             : 
    1374             :         // Wrap the point iterator (currently i.iter) with an interleaving
    1375             :         // iterator that interleaves range keys pulled from
    1376             :         // i.rangeKey.rangeKeyIter.
    1377           2 :         i.rangeKey.iiter.Init(i.comparer, i.iter, i.rangeKey.rangeKeyIter,
    1378           2 :                 keyspan.InterleavingIterOpts{
    1379           2 :                         LowerBound: i.opts.LowerBound,
    1380           2 :                         UpperBound: i.opts.UpperBound,
    1381           2 :                 })
    1382           2 :         i.iter = &i.rangeKey.iiter
    1383           2 : 
    1384           2 :         return i, nil
    1385             : }
    1386             : 
    1387             : func (i *Iterator) constructPointIter(
    1388             :         ctx context.Context, memtables flushableList, buf *iterAlloc,
    1389           2 : ) {
    1390           2 :         if i.pointIter != nil {
    1391           2 :                 // Already have one.
    1392           2 :                 return
    1393           2 :         }
    1394           2 :         internalOpts := internalIterOpts{stats: &i.stats.InternalStats}
    1395           2 :         if i.opts.RangeKeyMasking.Filter != nil {
    1396           2 :                 internalOpts.boundLimitedFilter = &i.rangeKeyMasking
    1397           2 :         }
    1398             : 
    1399             :         // Merging levels and levels from iterAlloc.
    1400           2 :         mlevels := buf.mlevels[:0]
    1401           2 :         levels := buf.levels[:0]
    1402           2 : 
    1403           2 :         // We compute the number of levels needed ahead of time and reallocate a slice if
    1404           2 :         // the array from the iterAlloc isn't large enough. Doing this allocation once
    1405           2 :         // should improve the performance.
    1406           2 :         numMergingLevels := 0
    1407           2 :         numLevelIters := 0
    1408           2 :         if i.batch != nil {
    1409           2 :                 numMergingLevels++
    1410           2 :         }
    1411             : 
    1412           2 :         var current *version
    1413           2 :         if !i.batchOnlyIter {
    1414           2 :                 numMergingLevels += len(memtables)
    1415           2 : 
    1416           2 :                 current = i.version
    1417           2 :                 if current == nil {
    1418           2 :                         current = i.readState.current
    1419           2 :                 }
    1420           2 :                 numMergingLevels += len(current.L0SublevelFiles)
    1421           2 :                 numLevelIters += len(current.L0SublevelFiles)
    1422           2 :                 for level := 1; level < len(current.Levels); level++ {
    1423           2 :                         if current.Levels[level].Empty() {
    1424           2 :                                 continue
    1425             :                         }
    1426           2 :                         numMergingLevels++
    1427           2 :                         numLevelIters++
    1428             :                 }
    1429             :         }
    1430             : 
    1431           2 :         if numMergingLevels > cap(mlevels) {
    1432           2 :                 mlevels = make([]mergingIterLevel, 0, numMergingLevels)
    1433           2 :         }
    1434           2 :         if numLevelIters > cap(levels) {
    1435           2 :                 levels = make([]levelIter, 0, numLevelIters)
    1436           2 :         }
    1437             : 
    1438             :         // Top-level is the batch, if any.
    1439           2 :         if i.batch != nil {
    1440           2 :                 if i.batch.index == nil {
    1441           0 :                         // This isn't an indexed batch. We shouldn't have gotten this far.
    1442           0 :                         panic(errors.AssertionFailedf("creating an iterator over an unindexed batch"))
    1443           2 :                 } else {
    1444           2 :                         i.batch.initInternalIter(&i.opts, &i.batchPointIter)
    1445           2 :                         i.batch.initRangeDelIter(&i.opts, &i.batchRangeDelIter, i.batchSeqNum)
    1446           2 :                         // Only include the batch's rangedel iterator if it's non-empty.
    1447           2 :                         // This requires some subtle logic in the case a rangedel is later
    1448           2 :                         // written to the batch and the view of the batch is refreshed
    1449           2 :                         // during a call to SetOptions—in this case, we need to reconstruct
    1450           2 :                         // the point iterator to add the batch rangedel iterator.
    1451           2 :                         var rangeDelIter keyspan.FragmentIterator
    1452           2 :                         if i.batchRangeDelIter.Count() > 0 {
    1453           1 :                                 rangeDelIter = &i.batchRangeDelIter
    1454           1 :                         }
    1455           2 :                         mlevels = append(mlevels, mergingIterLevel{
    1456           2 :                                 iter:         &i.batchPointIter,
    1457           2 :                                 rangeDelIter: rangeDelIter,
    1458           2 :                         })
    1459             :                 }
    1460             :         }
    1461             : 
    1462           2 :         if !i.batchOnlyIter {
    1463           2 :                 // Next are the memtables.
    1464           2 :                 for j := len(memtables) - 1; j >= 0; j-- {
    1465           2 :                         mem := memtables[j]
    1466           2 :                         mlevels = append(mlevels, mergingIterLevel{
    1467           2 :                                 iter:         mem.newIter(&i.opts),
    1468           2 :                                 rangeDelIter: mem.newRangeDelIter(&i.opts),
    1469           2 :                         })
    1470           2 :                 }
    1471             : 
    1472             :                 // Next are the file levels: L0 sub-levels followed by lower levels.
    1473           2 :                 mlevelsIndex := len(mlevels)
    1474           2 :                 levelsIndex := len(levels)
    1475           2 :                 mlevels = mlevels[:numMergingLevels]
    1476           2 :                 levels = levels[:numLevelIters]
    1477           2 :                 i.opts.snapshotForHideObsoletePoints = buf.dbi.seqNum
    1478           2 :                 addLevelIterForFiles := func(files manifest.LevelIterator, level manifest.Level) {
    1479           2 :                         li := &levels[levelsIndex]
    1480           2 : 
    1481           2 :                         li.init(ctx, i.opts, &i.comparer, i.newIters, files, level, internalOpts)
    1482           2 :                         li.initRangeDel(&mlevels[mlevelsIndex].rangeDelIter)
    1483           2 :                         li.initCombinedIterState(&i.lazyCombinedIter.combinedIterState)
    1484           2 :                         mlevels[mlevelsIndex].levelIter = li
    1485           2 :                         mlevels[mlevelsIndex].iter = invalidating.MaybeWrapIfInvariants(li)
    1486           2 : 
    1487           2 :                         levelsIndex++
    1488           2 :                         mlevelsIndex++
    1489           2 :                 }
    1490             : 
    1491             :                 // Add level iterators for the L0 sublevels, iterating from newest to
    1492             :                 // oldest.
    1493           2 :                 for i := len(current.L0SublevelFiles) - 1; i >= 0; i-- {
    1494           2 :                         addLevelIterForFiles(current.L0SublevelFiles[i].Iter(), manifest.L0Sublevel(i))
    1495           2 :                 }
    1496             : 
    1497             :                 // Add level iterators for the non-empty non-L0 levels.
    1498           2 :                 for level := 1; level < len(current.Levels); level++ {
    1499           2 :                         if current.Levels[level].Empty() {
    1500           2 :                                 continue
    1501             :                         }
    1502           2 :                         addLevelIterForFiles(current.Levels[level].Iter(), manifest.Level(level))
    1503             :                 }
    1504             :         }
    1505           2 :         buf.merging.init(&i.opts, &i.stats.InternalStats, i.comparer.Compare, i.comparer.Split, mlevels...)
    1506           2 :         if len(mlevels) <= cap(buf.levelsPositioned) {
    1507           2 :                 buf.merging.levelsPositioned = buf.levelsPositioned[:len(mlevels)]
    1508           2 :         }
    1509           2 :         buf.merging.snapshot = i.seqNum
    1510           2 :         buf.merging.batchSnapshot = i.batchSeqNum
    1511           2 :         buf.merging.combinedIterState = &i.lazyCombinedIter.combinedIterState
    1512           2 :         i.pointIter = invalidating.MaybeWrapIfInvariants(&buf.merging).(topLevelIterator)
    1513           2 :         i.merging = &buf.merging
    1514             : }
    1515             : 
    1516             : // NewBatch returns a new empty write-only batch. Any reads on the batch will
    1517             : // return an error. If the batch is committed it will be applied to the DB.
    1518           2 : func (d *DB) NewBatch(opts ...BatchOption) *Batch {
    1519           2 :         return newBatch(d, opts...)
    1520           2 : }
    1521             : 
    1522             : // NewBatchWithSize is mostly identical to NewBatch, but it will allocate the
    1523             : // the specified memory space for the internal slice in advance.
    1524           0 : func (d *DB) NewBatchWithSize(size int, opts ...BatchOption) *Batch {
    1525           0 :         return newBatchWithSize(d, size, opts...)
    1526           0 : }
    1527             : 
    1528             : // NewIndexedBatch returns a new empty read-write batch. Any reads on the batch
    1529             : // will read from both the batch and the DB. If the batch is committed it will
    1530             : // be applied to the DB. An indexed batch is slower that a non-indexed batch
    1531             : // for insert operations. If you do not need to perform reads on the batch, use
    1532             : // NewBatch instead.
    1533           2 : func (d *DB) NewIndexedBatch() *Batch {
    1534           2 :         return newIndexedBatch(d, d.opts.Comparer)
    1535           2 : }
    1536             : 
    1537             : // NewIndexedBatchWithSize is mostly identical to NewIndexedBatch, but it will
    1538             : // allocate the specified memory space for the internal slice in advance.
    1539           0 : func (d *DB) NewIndexedBatchWithSize(size int) *Batch {
    1540           0 :         return newIndexedBatchWithSize(d, d.opts.Comparer, size)
    1541           0 : }
    1542             : 
    1543             : // NewIter returns an iterator that is unpositioned (Iterator.Valid() will
    1544             : // return false). The iterator can be positioned via a call to SeekGE, SeekLT,
    1545             : // First or Last. The iterator provides a point-in-time view of the current DB
    1546             : // state. This view is maintained by preventing file deletions and preventing
    1547             : // memtables referenced by the iterator from being deleted. Using an iterator
    1548             : // to maintain a long-lived point-in-time view of the DB state can lead to an
    1549             : // apparent memory and disk usage leak. Use snapshots (see NewSnapshot) for
    1550             : // point-in-time snapshots which avoids these problems.
    1551           2 : func (d *DB) NewIter(o *IterOptions) (*Iterator, error) {
    1552           2 :         return d.NewIterWithContext(context.Background(), o)
    1553           2 : }
    1554             : 
    1555             : // NewIterWithContext is like NewIter, and additionally accepts a context for
    1556             : // tracing.
    1557           2 : func (d *DB) NewIterWithContext(ctx context.Context, o *IterOptions) (*Iterator, error) {
    1558           2 :         return d.newIter(ctx, nil /* batch */, newIterOpts{}, o), nil
    1559           2 : }
    1560             : 
    1561             : // NewSnapshot returns a point-in-time view of the current DB state. Iterators
    1562             : // created with this handle will all observe a stable snapshot of the current
    1563             : // DB state. The caller must call Snapshot.Close() when the snapshot is no
    1564             : // longer needed. Snapshots are not persisted across DB restarts (close ->
    1565             : // open). Unlike the implicit snapshot maintained by an iterator, a snapshot
    1566             : // will not prevent memtables from being released or sstables from being
    1567             : // deleted. Instead, a snapshot prevents deletion of sequence numbers
    1568             : // referenced by the snapshot.
    1569           2 : func (d *DB) NewSnapshot() *Snapshot {
    1570           2 :         if err := d.closed.Load(); err != nil {
    1571           1 :                 panic(err)
    1572             :         }
    1573             : 
    1574           2 :         d.mu.Lock()
    1575           2 :         s := &Snapshot{
    1576           2 :                 db:     d,
    1577           2 :                 seqNum: d.mu.versions.visibleSeqNum.Load(),
    1578           2 :         }
    1579           2 :         d.mu.snapshots.pushBack(s)
    1580           2 :         d.mu.Unlock()
    1581           2 :         return s
    1582             : }
    1583             : 
    1584             : // NewEventuallyFileOnlySnapshot returns a point-in-time view of the current DB
    1585             : // state, similar to NewSnapshot, but with consistency constrained to the
    1586             : // provided set of key ranges. See the comment at EventuallyFileOnlySnapshot for
    1587             : // its semantics.
    1588           2 : func (d *DB) NewEventuallyFileOnlySnapshot(keyRanges []KeyRange) *EventuallyFileOnlySnapshot {
    1589           2 :         if err := d.closed.Load(); err != nil {
    1590           0 :                 panic(err)
    1591             :         }
    1592           2 :         for i := range keyRanges {
    1593           2 :                 if i > 0 && d.cmp(keyRanges[i-1].End, keyRanges[i].Start) > 0 {
    1594           0 :                         panic("pebble: key ranges for eventually-file-only-snapshot not in order")
    1595             :                 }
    1596             :         }
    1597           2 :         return d.makeEventuallyFileOnlySnapshot(keyRanges)
    1598             : }
    1599             : 
    1600             : // Close closes the DB.
    1601             : //
    1602             : // It is not safe to close a DB until all outstanding iterators are closed
    1603             : // or to call Close concurrently with any other DB method. It is not valid
    1604             : // to call any of a DB's methods after the DB has been closed.
    1605           2 : func (d *DB) Close() error {
    1606           2 :         // Lock the commit pipeline for the duration of Close. This prevents a race
    1607           2 :         // with makeRoomForWrite. Rotating the WAL in makeRoomForWrite requires
    1608           2 :         // dropping d.mu several times for I/O. If Close only holds d.mu, an
    1609           2 :         // in-progress WAL rotation may re-acquire d.mu only once the database is
    1610           2 :         // closed.
    1611           2 :         //
    1612           2 :         // Additionally, locking the commit pipeline makes it more likely that
    1613           2 :         // (illegal) concurrent writes will observe d.closed.Load() != nil, creating
    1614           2 :         // more understable panics if the database is improperly used concurrently
    1615           2 :         // during Close.
    1616           2 :         d.commit.mu.Lock()
    1617           2 :         defer d.commit.mu.Unlock()
    1618           2 :         d.mu.Lock()
    1619           2 :         defer d.mu.Unlock()
    1620           2 :         if err := d.closed.Load(); err != nil {
    1621           1 :                 panic(err)
    1622             :         }
    1623             : 
    1624             :         // Clear the finalizer that is used to check that an unreferenced DB has been
    1625             :         // closed. We're closing the DB here, so the check performed by that
    1626             :         // finalizer isn't necessary.
    1627             :         //
    1628             :         // Note: this is a no-op if invariants are disabled or race is enabled.
    1629           2 :         invariants.SetFinalizer(d.closed, nil)
    1630           2 : 
    1631           2 :         d.closed.Store(errors.WithStack(ErrClosed))
    1632           2 :         close(d.closedCh)
    1633           2 : 
    1634           2 :         defer d.opts.Cache.Unref()
    1635           2 : 
    1636           2 :         for d.mu.compact.compactingCount > 0 || d.mu.compact.downloadingCount > 0 || d.mu.compact.flushing {
    1637           2 :                 d.mu.compact.cond.Wait()
    1638           2 :         }
    1639           2 :         for d.mu.tableStats.loading {
    1640           2 :                 d.mu.tableStats.cond.Wait()
    1641           2 :         }
    1642           2 :         for d.mu.tableValidation.validating {
    1643           2 :                 d.mu.tableValidation.cond.Wait()
    1644           2 :         }
    1645             : 
    1646           2 :         var err error
    1647           2 :         if n := len(d.mu.compact.inProgress); n > 0 {
    1648           1 :                 err = errors.Errorf("pebble: %d unexpected in-progress compactions", errors.Safe(n))
    1649           1 :         }
    1650           2 :         err = firstError(err, d.mu.formatVers.marker.Close())
    1651           2 :         err = firstError(err, d.tableCache.close())
    1652           2 :         if !d.opts.ReadOnly {
    1653           2 :                 if d.mu.log.writer != nil {
    1654           2 :                         _, err2 := d.mu.log.writer.Close()
    1655           2 :                         err = firstError(err, err2)
    1656           2 :                 }
    1657           1 :         } else if d.mu.log.writer != nil {
    1658           0 :                 panic("pebble: log-writer should be nil in read-only mode")
    1659             :         }
    1660           2 :         err = firstError(err, d.mu.log.manager.Close())
    1661           2 :         err = firstError(err, d.fileLock.Close())
    1662           2 : 
    1663           2 :         // Note that versionSet.close() only closes the MANIFEST. The versions list
    1664           2 :         // is still valid for the checks below.
    1665           2 :         err = firstError(err, d.mu.versions.close())
    1666           2 : 
    1667           2 :         err = firstError(err, d.dataDir.Close())
    1668           2 : 
    1669           2 :         d.readState.val.unrefLocked()
    1670           2 : 
    1671           2 :         current := d.mu.versions.currentVersion()
    1672           2 :         for v := d.mu.versions.versions.Front(); true; v = v.Next() {
    1673           2 :                 refs := v.Refs()
    1674           2 :                 if v == current {
    1675           2 :                         if refs != 1 {
    1676           1 :                                 err = firstError(err, errors.Errorf("leaked iterators: current\n%s", v))
    1677           1 :                         }
    1678           2 :                         break
    1679             :                 }
    1680           0 :                 if refs != 0 {
    1681           0 :                         err = firstError(err, errors.Errorf("leaked iterators:\n%s", v))
    1682           0 :                 }
    1683             :         }
    1684             : 
    1685           2 :         for _, mem := range d.mu.mem.queue {
    1686           2 :                 // Usually, we'd want to delete the files returned by readerUnref. But
    1687           2 :                 // in this case, even if we're unreferencing the flushables, the
    1688           2 :                 // flushables aren't obsolete. They will be reconstructed during WAL
    1689           2 :                 // replay.
    1690           2 :                 mem.readerUnrefLocked(false)
    1691           2 :         }
    1692             :         // If there's an unused, recycled memtable, we need to release its memory.
    1693           2 :         if obsoleteMemTable := d.memTableRecycle.Swap(nil); obsoleteMemTable != nil {
    1694           2 :                 d.freeMemTable(obsoleteMemTable)
    1695           2 :         }
    1696           2 :         if reserved := d.memTableReserved.Load(); reserved != 0 {
    1697           1 :                 err = firstError(err, errors.Errorf("leaked memtable reservation: %d", errors.Safe(reserved)))
    1698           1 :         }
    1699             : 
    1700             :         // Since we called d.readState.val.unrefLocked() above, we are expected to
    1701             :         // manually schedule deletion of obsolete files.
    1702           2 :         if len(d.mu.versions.obsoleteTables) > 0 {
    1703           2 :                 d.deleteObsoleteFiles(d.newJobIDLocked())
    1704           2 :         }
    1705             : 
    1706           2 :         d.mu.Unlock()
    1707           2 :         d.compactionSchedulers.Wait()
    1708           2 : 
    1709           2 :         // Wait for all cleaning jobs to finish.
    1710           2 :         d.cleanupManager.Close()
    1711           2 : 
    1712           2 :         // Sanity check metrics.
    1713           2 :         if invariants.Enabled {
    1714           2 :                 m := d.Metrics()
    1715           2 :                 if m.Compact.NumInProgress > 0 || m.Compact.InProgressBytes > 0 {
    1716           0 :                         d.mu.Lock()
    1717           0 :                         panic(fmt.Sprintf("invalid metrics on close:\n%s", m))
    1718             :                 }
    1719             :         }
    1720             : 
    1721           2 :         d.mu.Lock()
    1722           2 : 
    1723           2 :         // As a sanity check, ensure that there are no zombie tables. A non-zero count
    1724           2 :         // hints at a reference count leak.
    1725           2 :         if ztbls := len(d.mu.versions.zombieTables); ztbls > 0 {
    1726           0 :                 err = firstError(err, errors.Errorf("non-zero zombie file count: %d", ztbls))
    1727           0 :         }
    1728             : 
    1729           2 :         err = firstError(err, d.objProvider.Close())
    1730           2 : 
    1731           2 :         // If the options include a closer to 'close' the filesystem, close it.
    1732           2 :         if d.opts.private.fsCloser != nil {
    1733           2 :                 d.opts.private.fsCloser.Close()
    1734           2 :         }
    1735             : 
    1736             :         // Return an error if the user failed to close all open snapshots.
    1737           2 :         if v := d.mu.snapshots.count(); v > 0 {
    1738           0 :                 err = firstError(err, errors.Errorf("leaked snapshots: %d open snapshots on DB %p", v, d))
    1739           0 :         }
    1740             : 
    1741           2 :         return err
    1742             : }
    1743             : 
    1744             : // Compact the specified range of keys in the database.
    1745           2 : func (d *DB) Compact(start, end []byte, parallelize bool) error {
    1746           2 :         if err := d.closed.Load(); err != nil {
    1747           1 :                 panic(err)
    1748             :         }
    1749           2 :         if d.opts.ReadOnly {
    1750           1 :                 return ErrReadOnly
    1751           1 :         }
    1752           2 :         if d.cmp(start, end) >= 0 {
    1753           2 :                 return errors.Errorf("Compact start %s is not less than end %s",
    1754           2 :                         d.opts.Comparer.FormatKey(start), d.opts.Comparer.FormatKey(end))
    1755           2 :         }
    1756             : 
    1757           2 :         d.mu.Lock()
    1758           2 :         maxLevelWithFiles := 1
    1759           2 :         cur := d.mu.versions.currentVersion()
    1760           2 :         for level := 0; level < numLevels; level++ {
    1761           2 :                 overlaps := cur.Overlaps(level, base.UserKeyBoundsInclusive(start, end))
    1762           2 :                 if !overlaps.Empty() {
    1763           2 :                         maxLevelWithFiles = level + 1
    1764           2 :                 }
    1765             :         }
    1766             : 
    1767             :         // Determine if any memtable overlaps with the compaction range. We wait for
    1768             :         // any such overlap to flush (initiating a flush if necessary).
    1769           2 :         mem, err := func() (*flushableEntry, error) {
    1770           2 :                 // Check to see if any files overlap with any of the memtables. The queue
    1771           2 :                 // is ordered from oldest to newest with the mutable memtable being the
    1772           2 :                 // last element in the slice. We want to wait for the newest table that
    1773           2 :                 // overlaps.
    1774           2 :                 for i := len(d.mu.mem.queue) - 1; i >= 0; i-- {
    1775           2 :                         mem := d.mu.mem.queue[i]
    1776           2 :                         var anyOverlaps bool
    1777           2 :                         mem.computePossibleOverlaps(func(b bounded) shouldContinue {
    1778           2 :                                 anyOverlaps = true
    1779           2 :                                 return stopIteration
    1780           2 :                         }, KeyRange{Start: start, End: end})
    1781           2 :                         if !anyOverlaps {
    1782           2 :                                 continue
    1783             :                         }
    1784           2 :                         var err error
    1785           2 :                         if mem.flushable == d.mu.mem.mutable {
    1786           2 :                                 // We have to hold both commitPipeline.mu and DB.mu when calling
    1787           2 :                                 // makeRoomForWrite(). Lock order requirements elsewhere force us to
    1788           2 :                                 // unlock DB.mu in order to grab commitPipeline.mu first.
    1789           2 :                                 d.mu.Unlock()
    1790           2 :                                 d.commit.mu.Lock()
    1791           2 :                                 d.mu.Lock()
    1792           2 :                                 defer d.commit.mu.Unlock()
    1793           2 :                                 if mem.flushable == d.mu.mem.mutable {
    1794           2 :                                         // Only flush if the active memtable is unchanged.
    1795           2 :                                         err = d.makeRoomForWrite(nil)
    1796           2 :                                 }
    1797             :                         }
    1798           2 :                         mem.flushForced = true
    1799           2 :                         d.maybeScheduleFlush()
    1800           2 :                         return mem, err
    1801             :                 }
    1802           2 :                 return nil, nil
    1803             :         }()
    1804             : 
    1805           2 :         d.mu.Unlock()
    1806           2 : 
    1807           2 :         if err != nil {
    1808           0 :                 return err
    1809           0 :         }
    1810           2 :         if mem != nil {
    1811           2 :                 <-mem.flushed
    1812           2 :         }
    1813             : 
    1814           2 :         for level := 0; level < maxLevelWithFiles; {
    1815           2 :                 for {
    1816           2 :                         if err := d.manualCompact(
    1817           2 :                                 start, end, level, parallelize); err != nil {
    1818           1 :                                 if errors.Is(err, ErrCancelledCompaction) {
    1819           1 :                                         continue
    1820             :                                 }
    1821           1 :                                 return err
    1822             :                         }
    1823           2 :                         break
    1824             :                 }
    1825           2 :                 level++
    1826           2 :                 if level == numLevels-1 {
    1827           2 :                         // A manual compaction of the bottommost level occurred.
    1828           2 :                         // There is no next level to try and compact.
    1829           2 :                         break
    1830             :                 }
    1831             :         }
    1832           2 :         return nil
    1833             : }
    1834             : 
    1835           2 : func (d *DB) manualCompact(start, end []byte, level int, parallelize bool) error {
    1836           2 :         d.mu.Lock()
    1837           2 :         curr := d.mu.versions.currentVersion()
    1838           2 :         files := curr.Overlaps(level, base.UserKeyBoundsInclusive(start, end))
    1839           2 :         if files.Empty() {
    1840           2 :                 d.mu.Unlock()
    1841           2 :                 return nil
    1842           2 :         }
    1843             : 
    1844           2 :         var compactions []*manualCompaction
    1845           2 :         if parallelize {
    1846           2 :                 compactions = append(compactions, d.splitManualCompaction(start, end, level)...)
    1847           2 :         } else {
    1848           2 :                 compactions = append(compactions, &manualCompaction{
    1849           2 :                         level: level,
    1850           2 :                         done:  make(chan error, 1),
    1851           2 :                         start: start,
    1852           2 :                         end:   end,
    1853           2 :                 })
    1854           2 :         }
    1855           2 :         d.mu.compact.manual = append(d.mu.compact.manual, compactions...)
    1856           2 :         d.maybeScheduleCompaction()
    1857           2 :         d.mu.Unlock()
    1858           2 : 
    1859           2 :         // Each of the channels is guaranteed to be eventually sent to once. After a
    1860           2 :         // compaction is possibly picked in d.maybeScheduleCompaction(), either the
    1861           2 :         // compaction is dropped, executed after being scheduled, or retried later.
    1862           2 :         // Assuming eventual progress when a compaction is retried, all outcomes send
    1863           2 :         // a value to the done channel. Since the channels are buffered, it is not
    1864           2 :         // necessary to read from each channel, and so we can exit early in the event
    1865           2 :         // of an error.
    1866           2 :         for _, compaction := range compactions {
    1867           2 :                 if err := <-compaction.done; err != nil {
    1868           1 :                         return err
    1869           1 :                 }
    1870             :         }
    1871           2 :         return nil
    1872             : }
    1873             : 
    1874             : // splitManualCompaction splits a manual compaction over [start,end] on level
    1875             : // such that the resulting compactions have no key overlap.
    1876             : func (d *DB) splitManualCompaction(
    1877             :         start, end []byte, level int,
    1878           2 : ) (splitCompactions []*manualCompaction) {
    1879           2 :         curr := d.mu.versions.currentVersion()
    1880           2 :         endLevel := level + 1
    1881           2 :         baseLevel := d.mu.versions.picker.getBaseLevel()
    1882           2 :         if level == 0 {
    1883           2 :                 endLevel = baseLevel
    1884           2 :         }
    1885           2 :         keyRanges := curr.CalculateInuseKeyRanges(level, endLevel, start, end)
    1886           2 :         for _, keyRange := range keyRanges {
    1887           2 :                 splitCompactions = append(splitCompactions, &manualCompaction{
    1888           2 :                         level: level,
    1889           2 :                         done:  make(chan error, 1),
    1890           2 :                         start: keyRange.Start,
    1891           2 :                         end:   keyRange.End.Key,
    1892           2 :                         split: true,
    1893           2 :                 })
    1894           2 :         }
    1895           2 :         return splitCompactions
    1896             : }
    1897             : 
    1898             : // Flush the memtable to stable storage.
    1899           2 : func (d *DB) Flush() error {
    1900           2 :         flushDone, err := d.AsyncFlush()
    1901           2 :         if err != nil {
    1902           1 :                 return err
    1903           1 :         }
    1904           2 :         <-flushDone
    1905           2 :         return nil
    1906             : }
    1907             : 
    1908             : // AsyncFlush asynchronously flushes the memtable to stable storage.
    1909             : //
    1910             : // If no error is returned, the caller can receive from the returned channel in
    1911             : // order to wait for the flush to complete.
    1912           2 : func (d *DB) AsyncFlush() (<-chan struct{}, error) {
    1913           2 :         if err := d.closed.Load(); err != nil {
    1914           1 :                 panic(err)
    1915             :         }
    1916           2 :         if d.opts.ReadOnly {
    1917           1 :                 return nil, ErrReadOnly
    1918           1 :         }
    1919             : 
    1920           2 :         d.commit.mu.Lock()
    1921           2 :         defer d.commit.mu.Unlock()
    1922           2 :         d.mu.Lock()
    1923           2 :         defer d.mu.Unlock()
    1924           2 :         flushed := d.mu.mem.queue[len(d.mu.mem.queue)-1].flushed
    1925           2 :         err := d.makeRoomForWrite(nil)
    1926           2 :         if err != nil {
    1927           0 :                 return nil, err
    1928           0 :         }
    1929           2 :         return flushed, nil
    1930             : }
    1931             : 
    1932             : // Metrics returns metrics about the database.
    1933           2 : func (d *DB) Metrics() *Metrics {
    1934           2 :         metrics := &Metrics{}
    1935           2 :         walStats := d.mu.log.manager.Stats()
    1936           2 : 
    1937           2 :         d.mu.Lock()
    1938           2 :         vers := d.mu.versions.currentVersion()
    1939           2 :         *metrics = d.mu.versions.metrics
    1940           2 :         metrics.Compact.EstimatedDebt = d.mu.versions.picker.estimatedCompactionDebt(0)
    1941           2 :         metrics.Compact.InProgressBytes = d.mu.versions.atomicInProgressBytes.Load()
    1942           2 :         // TODO(radu): split this to separate the download compactions.
    1943           2 :         metrics.Compact.NumInProgress = int64(d.mu.compact.compactingCount + d.mu.compact.downloadingCount)
    1944           2 :         metrics.Compact.MarkedFiles = vers.Stats.MarkedForCompaction
    1945           2 :         metrics.Compact.Duration = d.mu.compact.duration
    1946           2 :         for c := range d.mu.compact.inProgress {
    1947           1 :                 if c.kind != compactionKindFlush {
    1948           1 :                         metrics.Compact.Duration += d.timeNow().Sub(c.beganAt)
    1949           1 :                 }
    1950             :         }
    1951             : 
    1952           2 :         for _, m := range d.mu.mem.queue {
    1953           2 :                 metrics.MemTable.Size += m.totalBytes()
    1954           2 :         }
    1955           2 :         metrics.Snapshots.Count = d.mu.snapshots.count()
    1956           2 :         if metrics.Snapshots.Count > 0 {
    1957           0 :                 metrics.Snapshots.EarliestSeqNum = d.mu.snapshots.earliest()
    1958           0 :         }
    1959           2 :         metrics.Snapshots.PinnedKeys = d.mu.snapshots.cumulativePinnedCount
    1960           2 :         metrics.Snapshots.PinnedSize = d.mu.snapshots.cumulativePinnedSize
    1961           2 :         metrics.MemTable.Count = int64(len(d.mu.mem.queue))
    1962           2 :         metrics.MemTable.ZombieCount = d.memTableCount.Load() - metrics.MemTable.Count
    1963           2 :         metrics.MemTable.ZombieSize = uint64(d.memTableReserved.Load()) - metrics.MemTable.Size
    1964           2 :         metrics.WAL.ObsoleteFiles = int64(walStats.ObsoleteFileCount)
    1965           2 :         metrics.WAL.ObsoletePhysicalSize = walStats.ObsoleteFileSize
    1966           2 :         metrics.WAL.Files = int64(walStats.LiveFileCount)
    1967           2 :         // The current WAL's size (d.logSize) is the logical size, which may be less
    1968           2 :         // than the WAL's physical size if it was recycled. walStats.LiveFileSize
    1969           2 :         // includes the physical size of all live WALs, but for the current WAL it
    1970           2 :         // reflects the physical size when it was opened. So it is possible that
    1971           2 :         // d.atomic.logSize has exceeded that physical size. We allow for this
    1972           2 :         // anomaly.
    1973           2 :         metrics.WAL.PhysicalSize = walStats.LiveFileSize
    1974           2 :         metrics.WAL.BytesIn = d.logBytesIn.Load()
    1975           2 :         metrics.WAL.Size = d.logSize.Load()
    1976           2 :         for i, n := 0, len(d.mu.mem.queue)-1; i < n; i++ {
    1977           2 :                 metrics.WAL.Size += d.mu.mem.queue[i].logSize
    1978           2 :         }
    1979           2 :         metrics.WAL.BytesWritten = metrics.Levels[0].BytesIn + metrics.WAL.Size
    1980           2 :         metrics.WAL.Failover = walStats.Failover
    1981           2 : 
    1982           2 :         if p := d.mu.versions.picker; p != nil {
    1983           2 :                 compactions := d.getInProgressCompactionInfoLocked(nil)
    1984           2 :                 for level, score := range p.getScores(compactions) {
    1985           2 :                         metrics.Levels[level].Score = score
    1986           2 :                 }
    1987             :         }
    1988           2 :         metrics.Table.ZombieCount = int64(len(d.mu.versions.zombieTables))
    1989           2 :         for _, info := range d.mu.versions.zombieTables {
    1990           1 :                 metrics.Table.ZombieSize += info.FileSize
    1991           1 :                 if info.isLocal {
    1992           1 :                         metrics.Table.Local.ZombieSize += info.FileSize
    1993           1 :                 }
    1994             :         }
    1995           2 :         metrics.private.optionsFileSize = d.optionsFileSize
    1996           2 : 
    1997           2 :         // TODO(jackson): Consider making these metrics optional.
    1998           2 :         metrics.Keys.RangeKeySetsCount = countRangeKeySetFragments(vers)
    1999           2 :         metrics.Keys.TombstoneCount = countTombstones(vers)
    2000           2 : 
    2001           2 :         d.mu.versions.logLock()
    2002           2 :         metrics.private.manifestFileSize = uint64(d.mu.versions.manifest.Size())
    2003           2 :         backingCount, backingTotalSize := d.mu.versions.virtualBackings.Stats()
    2004           2 :         metrics.Table.BackingTableCount = uint64(backingCount)
    2005           2 :         metrics.Table.BackingTableSize = backingTotalSize
    2006           2 :         d.mu.versions.logUnlock()
    2007           2 : 
    2008           2 :         metrics.LogWriter.FsyncLatency = d.mu.log.metrics.fsyncLatency
    2009           2 :         if err := metrics.LogWriter.Merge(&d.mu.log.metrics.LogWriterMetrics); err != nil {
    2010           0 :                 d.opts.Logger.Errorf("metrics error: %s", err)
    2011           0 :         }
    2012           2 :         metrics.Flush.WriteThroughput = d.mu.compact.flushWriteThroughput
    2013           2 :         if d.mu.compact.flushing {
    2014           1 :                 metrics.Flush.NumInProgress = 1
    2015           1 :         }
    2016           2 :         for i := 0; i < numLevels; i++ {
    2017           2 :                 metrics.Levels[i].Additional.ValueBlocksSize = valueBlocksSizeForLevel(vers, i)
    2018           2 :                 unknown, snappy, none, zstd := compressionTypesForLevel(vers, i)
    2019           2 :                 metrics.Table.CompressedCountUnknown += int64(unknown)
    2020           2 :                 metrics.Table.CompressedCountSnappy += int64(snappy)
    2021           2 :                 metrics.Table.CompressedCountZstd += int64(zstd)
    2022           2 :                 metrics.Table.CompressedCountNone += int64(none)
    2023           2 :         }
    2024             : 
    2025           2 :         d.mu.Unlock()
    2026           2 : 
    2027           2 :         metrics.BlockCache = d.opts.Cache.Metrics()
    2028           2 :         metrics.TableCache, metrics.Filter = d.tableCache.metrics()
    2029           2 :         metrics.TableIters = int64(d.tableCache.iterCount())
    2030           2 :         metrics.CategoryStats = d.tableCache.dbOpts.sstStatsCollector.GetStats()
    2031           2 : 
    2032           2 :         metrics.SecondaryCacheMetrics = d.objProvider.Metrics()
    2033           2 : 
    2034           2 :         metrics.Uptime = d.timeNow().Sub(d.openedAt)
    2035           2 : 
    2036           2 :         return metrics
    2037             : }
    2038             : 
    2039             : // sstablesOptions hold the optional parameters to retrieve TableInfo for all sstables.
    2040             : type sstablesOptions struct {
    2041             :         // set to true will return the sstable properties in TableInfo
    2042             :         withProperties bool
    2043             : 
    2044             :         // if set, return sstables that overlap the key range (end-exclusive)
    2045             :         start []byte
    2046             :         end   []byte
    2047             : 
    2048             :         withApproximateSpanBytes bool
    2049             : }
    2050             : 
    2051             : // SSTablesOption set optional parameter used by `DB.SSTables`.
    2052             : type SSTablesOption func(*sstablesOptions)
    2053             : 
    2054             : // WithProperties enable return sstable properties in each TableInfo.
    2055             : //
    2056             : // NOTE: if most of the sstable properties need to be read from disk,
    2057             : // this options may make method `SSTables` quite slow.
    2058           1 : func WithProperties() SSTablesOption {
    2059           1 :         return func(opt *sstablesOptions) {
    2060           1 :                 opt.withProperties = true
    2061           1 :         }
    2062             : }
    2063             : 
    2064             : // WithKeyRangeFilter ensures returned sstables overlap start and end (end-exclusive)
    2065             : // if start and end are both nil these properties have no effect.
    2066           1 : func WithKeyRangeFilter(start, end []byte) SSTablesOption {
    2067           1 :         return func(opt *sstablesOptions) {
    2068           1 :                 opt.end = end
    2069           1 :                 opt.start = start
    2070           1 :         }
    2071             : }
    2072             : 
    2073             : // WithApproximateSpanBytes enables capturing the approximate number of bytes that
    2074             : // overlap the provided key span for each sstable.
    2075             : // NOTE: this option can only be used with WithKeyRangeFilter and WithProperties
    2076             : // provided.
    2077           1 : func WithApproximateSpanBytes() SSTablesOption {
    2078           1 :         return func(opt *sstablesOptions) {
    2079           1 :                 opt.withApproximateSpanBytes = true
    2080           1 :         }
    2081             : }
    2082             : 
    2083             : // BackingType denotes the type of storage backing a given sstable.
    2084             : type BackingType int
    2085             : 
    2086             : const (
    2087             :         // BackingTypeLocal denotes an sstable stored on local disk according to the
    2088             :         // objprovider. This file is completely owned by us.
    2089             :         BackingTypeLocal BackingType = iota
    2090             :         // BackingTypeShared denotes an sstable stored on shared storage, created
    2091             :         // by this Pebble instance and possibly shared by other Pebble instances.
    2092             :         // These types of files have lifecycle managed by Pebble.
    2093             :         BackingTypeShared
    2094             :         // BackingTypeSharedForeign denotes an sstable stored on shared storage,
    2095             :         // created by a Pebble instance other than this one. These types of files have
    2096             :         // lifecycle managed by Pebble.
    2097             :         BackingTypeSharedForeign
    2098             :         // BackingTypeExternal denotes an sstable stored on external storage,
    2099             :         // not owned by any Pebble instance and with no refcounting/cleanup methods
    2100             :         // or lifecycle management. An example of an external file is a file restored
    2101             :         // from a backup.
    2102             :         BackingTypeExternal
    2103             : )
    2104             : 
    2105             : // SSTableInfo export manifest.TableInfo with sstable.Properties alongside
    2106             : // other file backing info.
    2107             : type SSTableInfo struct {
    2108             :         manifest.TableInfo
    2109             :         // Virtual indicates whether the sstable is virtual.
    2110             :         Virtual bool
    2111             :         // BackingSSTNum is the disk file number associated with the backing sstable.
    2112             :         // If Virtual is false, BackingSSTNum == PhysicalTableDiskFileNum(FileNum).
    2113             :         BackingSSTNum base.DiskFileNum
    2114             :         // BackingType is the type of storage backing this sstable.
    2115             :         BackingType BackingType
    2116             :         // Locator is the remote.Locator backing this sstable, if the backing type is
    2117             :         // not BackingTypeLocal.
    2118             :         Locator remote.Locator
    2119             : 
    2120             :         // Properties is the sstable properties of this table. If Virtual is true,
    2121             :         // then the Properties are associated with the backing sst.
    2122             :         Properties *sstable.Properties
    2123             : }
    2124             : 
    2125             : // SSTables retrieves the current sstables. The returned slice is indexed by
    2126             : // level and each level is indexed by the position of the sstable within the
    2127             : // level. Note that this information may be out of date due to concurrent
    2128             : // flushes and compactions.
    2129           1 : func (d *DB) SSTables(opts ...SSTablesOption) ([][]SSTableInfo, error) {
    2130           1 :         opt := &sstablesOptions{}
    2131           1 :         for _, fn := range opts {
    2132           1 :                 fn(opt)
    2133           1 :         }
    2134             : 
    2135           1 :         if opt.withApproximateSpanBytes && !opt.withProperties {
    2136           1 :                 return nil, errors.Errorf("Cannot use WithApproximateSpanBytes without WithProperties option.")
    2137           1 :         }
    2138           1 :         if opt.withApproximateSpanBytes && (opt.start == nil || opt.end == nil) {
    2139           1 :                 return nil, errors.Errorf("Cannot use WithApproximateSpanBytes without WithKeyRangeFilter option.")
    2140           1 :         }
    2141             : 
    2142             :         // Grab and reference the current readState.
    2143           1 :         readState := d.loadReadState()
    2144           1 :         defer readState.unref()
    2145           1 : 
    2146           1 :         // TODO(peter): This is somewhat expensive, especially on a large
    2147           1 :         // database. It might be worthwhile to unify TableInfo and FileMetadata and
    2148           1 :         // then we could simply return current.Files. Note that RocksDB is doing
    2149           1 :         // something similar to the current code, so perhaps it isn't too bad.
    2150           1 :         srcLevels := readState.current.Levels
    2151           1 :         var totalTables int
    2152           1 :         for i := range srcLevels {
    2153           1 :                 totalTables += srcLevels[i].Len()
    2154           1 :         }
    2155             : 
    2156           1 :         destTables := make([]SSTableInfo, totalTables)
    2157           1 :         destLevels := make([][]SSTableInfo, len(srcLevels))
    2158           1 :         for i := range destLevels {
    2159           1 :                 iter := srcLevels[i].Iter()
    2160           1 :                 j := 0
    2161           1 :                 for m := iter.First(); m != nil; m = iter.Next() {
    2162           1 :                         if opt.start != nil && opt.end != nil {
    2163           1 :                                 b := base.UserKeyBoundsEndExclusive(opt.start, opt.end)
    2164           1 :                                 if !m.Overlaps(d.opts.Comparer.Compare, &b) {
    2165           1 :                                         continue
    2166             :                                 }
    2167             :                         }
    2168           1 :                         destTables[j] = SSTableInfo{TableInfo: m.TableInfo()}
    2169           1 :                         if opt.withProperties {
    2170           1 :                                 p, err := d.tableCache.getTableProperties(
    2171           1 :                                         m,
    2172           1 :                                 )
    2173           1 :                                 if err != nil {
    2174           0 :                                         return nil, err
    2175           0 :                                 }
    2176           1 :                                 destTables[j].Properties = p
    2177             :                         }
    2178           1 :                         destTables[j].Virtual = m.Virtual
    2179           1 :                         destTables[j].BackingSSTNum = m.FileBacking.DiskFileNum
    2180           1 :                         objMeta, err := d.objProvider.Lookup(fileTypeTable, m.FileBacking.DiskFileNum)
    2181           1 :                         if err != nil {
    2182           0 :                                 return nil, err
    2183           0 :                         }
    2184           1 :                         if objMeta.IsRemote() {
    2185           0 :                                 if objMeta.IsShared() {
    2186           0 :                                         if d.objProvider.IsSharedForeign(objMeta) {
    2187           0 :                                                 destTables[j].BackingType = BackingTypeSharedForeign
    2188           0 :                                         } else {
    2189           0 :                                                 destTables[j].BackingType = BackingTypeShared
    2190           0 :                                         }
    2191           0 :                                 } else {
    2192           0 :                                         destTables[j].BackingType = BackingTypeExternal
    2193           0 :                                 }
    2194           0 :                                 destTables[j].Locator = objMeta.Remote.Locator
    2195           1 :                         } else {
    2196           1 :                                 destTables[j].BackingType = BackingTypeLocal
    2197           1 :                         }
    2198             : 
    2199           1 :                         if opt.withApproximateSpanBytes {
    2200           1 :                                 var spanBytes uint64
    2201           1 :                                 if m.ContainedWithinSpan(d.opts.Comparer.Compare, opt.start, opt.end) {
    2202           0 :                                         spanBytes = m.Size
    2203           1 :                                 } else {
    2204           1 :                                         size, err := d.tableCache.estimateSize(m, opt.start, opt.end)
    2205           1 :                                         if err != nil {
    2206           0 :                                                 return nil, err
    2207           0 :                                         }
    2208           1 :                                         spanBytes = size
    2209             :                                 }
    2210           1 :                                 propertiesCopy := *destTables[j].Properties
    2211           1 : 
    2212           1 :                                 // Deep copy user properties so approximate span bytes can be added.
    2213           1 :                                 propertiesCopy.UserProperties = make(map[string]string, len(destTables[j].Properties.UserProperties)+1)
    2214           1 :                                 for k, v := range destTables[j].Properties.UserProperties {
    2215           0 :                                         propertiesCopy.UserProperties[k] = v
    2216           0 :                                 }
    2217           1 :                                 propertiesCopy.UserProperties["approximate-span-bytes"] = strconv.FormatUint(spanBytes, 10)
    2218           1 :                                 destTables[j].Properties = &propertiesCopy
    2219             :                         }
    2220           1 :                         j++
    2221             :                 }
    2222           1 :                 destLevels[i] = destTables[:j]
    2223           1 :                 destTables = destTables[j:]
    2224             :         }
    2225             : 
    2226           1 :         return destLevels, nil
    2227             : }
    2228             : 
    2229             : // EstimateDiskUsage returns the estimated filesystem space used in bytes for
    2230             : // storing the range `[start, end]`. The estimation is computed as follows:
    2231             : //
    2232             : //   - For sstables fully contained in the range the whole file size is included.
    2233             : //   - For sstables partially contained in the range the overlapping data block sizes
    2234             : //     are included. Even if a data block partially overlaps, or we cannot determine
    2235             : //     overlap due to abbreviated index keys, the full data block size is included in
    2236             : //     the estimation. Note that unlike fully contained sstables, none of the
    2237             : //     meta-block space is counted for partially overlapped files.
    2238             : //   - For virtual sstables, we use the overlap between start, end and the virtual
    2239             : //     sstable bounds to determine disk usage.
    2240             : //   - There may also exist WAL entries for unflushed keys in this range. This
    2241             : //     estimation currently excludes space used for the range in the WAL.
    2242           1 : func (d *DB) EstimateDiskUsage(start, end []byte) (uint64, error) {
    2243           1 :         bytes, _, _, err := d.EstimateDiskUsageByBackingType(start, end)
    2244           1 :         return bytes, err
    2245           1 : }
    2246             : 
    2247             : // EstimateDiskUsageByBackingType is like EstimateDiskUsage but additionally
    2248             : // returns the subsets of that size in remote ane external files.
    2249             : func (d *DB) EstimateDiskUsageByBackingType(
    2250             :         start, end []byte,
    2251           1 : ) (totalSize, remoteSize, externalSize uint64, _ error) {
    2252           1 :         if err := d.closed.Load(); err != nil {
    2253           0 :                 panic(err)
    2254             :         }
    2255           1 :         if d.opts.Comparer.Compare(start, end) > 0 {
    2256           0 :                 return 0, 0, 0, errors.New("invalid key-range specified (start > end)")
    2257           0 :         }
    2258             : 
    2259             :         // Grab and reference the current readState. This prevents the underlying
    2260             :         // files in the associated version from being deleted if there is a concurrent
    2261             :         // compaction.
    2262           1 :         readState := d.loadReadState()
    2263           1 :         defer readState.unref()
    2264           1 : 
    2265           1 :         for level, files := range readState.current.Levels {
    2266           1 :                 iter := files.Iter()
    2267           1 :                 if level > 0 {
    2268           1 :                         // We can only use `Overlaps` to restrict `files` at L1+ since at L0 it
    2269           1 :                         // expands the range iteratively until it has found a set of files that
    2270           1 :                         // do not overlap any other L0 files outside that set.
    2271           1 :                         overlaps := readState.current.Overlaps(level, base.UserKeyBoundsInclusive(start, end))
    2272           1 :                         iter = overlaps.Iter()
    2273           1 :                 }
    2274           1 :                 for file := iter.First(); file != nil; file = iter.Next() {
    2275           1 :                         if d.opts.Comparer.Compare(start, file.Smallest.UserKey) <= 0 &&
    2276           1 :                                 d.opts.Comparer.Compare(file.Largest.UserKey, end) <= 0 {
    2277           1 :                                 // The range fully contains the file, so skip looking it up in
    2278           1 :                                 // table cache/looking at its indexes, and add the full file size.
    2279           1 :                                 meta, err := d.objProvider.Lookup(fileTypeTable, file.FileBacking.DiskFileNum)
    2280           1 :                                 if err != nil {
    2281           0 :                                         return 0, 0, 0, err
    2282           0 :                                 }
    2283           1 :                                 if meta.IsRemote() {
    2284           0 :                                         remoteSize += file.Size
    2285           0 :                                         if meta.Remote.CleanupMethod == objstorage.SharedNoCleanup {
    2286           0 :                                                 externalSize += file.Size
    2287           0 :                                         }
    2288             :                                 }
    2289           1 :                                 totalSize += file.Size
    2290           1 :                         } else if d.opts.Comparer.Compare(file.Smallest.UserKey, end) <= 0 &&
    2291           1 :                                 d.opts.Comparer.Compare(start, file.Largest.UserKey) <= 0 {
    2292           1 :                                 var size uint64
    2293           1 :                                 var err error
    2294           1 :                                 if file.Virtual {
    2295           0 :                                         err = d.tableCache.withVirtualReader(
    2296           0 :                                                 file.VirtualMeta(),
    2297           0 :                                                 func(r sstable.VirtualReader) (err error) {
    2298           0 :                                                         size, err = r.EstimateDiskUsage(start, end)
    2299           0 :                                                         return err
    2300           0 :                                                 },
    2301             :                                         )
    2302           1 :                                 } else {
    2303           1 :                                         err = d.tableCache.withReader(
    2304           1 :                                                 file.PhysicalMeta(),
    2305           1 :                                                 func(r *sstable.Reader) (err error) {
    2306           1 :                                                         size, err = r.EstimateDiskUsage(start, end)
    2307           1 :                                                         return err
    2308           1 :                                                 },
    2309             :                                         )
    2310             :                                 }
    2311           1 :                                 if err != nil {
    2312           0 :                                         return 0, 0, 0, err
    2313           0 :                                 }
    2314           1 :                                 meta, err := d.objProvider.Lookup(fileTypeTable, file.FileBacking.DiskFileNum)
    2315           1 :                                 if err != nil {
    2316           0 :                                         return 0, 0, 0, err
    2317           0 :                                 }
    2318           1 :                                 if meta.IsRemote() {
    2319           0 :                                         remoteSize += size
    2320           0 :                                         if meta.Remote.CleanupMethod == objstorage.SharedNoCleanup {
    2321           0 :                                                 externalSize += size
    2322           0 :                                         }
    2323             :                                 }
    2324           1 :                                 totalSize += size
    2325             :                         }
    2326             :                 }
    2327             :         }
    2328           1 :         return totalSize, remoteSize, externalSize, nil
    2329             : }
    2330             : 
    2331           2 : func (d *DB) walPreallocateSize() int {
    2332           2 :         // Set the WAL preallocate size to 110% of the memtable size. Note that there
    2333           2 :         // is a bit of apples and oranges in units here as the memtabls size
    2334           2 :         // corresponds to the memory usage of the memtable while the WAL size is the
    2335           2 :         // size of the batches (plus overhead) stored in the WAL.
    2336           2 :         //
    2337           2 :         // TODO(peter): 110% of the memtable size is quite hefty for a block
    2338           2 :         // size. This logic is taken from GetWalPreallocateBlockSize in
    2339           2 :         // RocksDB. Could a smaller preallocation block size be used?
    2340           2 :         size := d.opts.MemTableSize
    2341           2 :         size = (size / 10) + size
    2342           2 :         return int(size)
    2343           2 : }
    2344             : 
    2345             : func (d *DB) newMemTable(
    2346             :         logNum base.DiskFileNum, logSeqNum, minSize uint64,
    2347           2 : ) (*memTable, *flushableEntry) {
    2348           2 :         targetSize := minSize + uint64(memTableEmptySize)
    2349           2 :         // The targetSize should be less than MemTableSize, because any batch >=
    2350           2 :         // MemTableSize/2 should be treated as a large flushable batch.
    2351           2 :         if targetSize > d.opts.MemTableSize {
    2352           0 :                 panic(errors.AssertionFailedf("attempting to allocate memtable larger than MemTableSize"))
    2353             :         }
    2354             :         // Double until the next memtable size is at least large enough to fit
    2355             :         // minSize.
    2356           2 :         for d.mu.mem.nextSize < targetSize {
    2357           1 :                 d.mu.mem.nextSize = min(2*d.mu.mem.nextSize, d.opts.MemTableSize)
    2358           1 :         }
    2359           2 :         size := d.mu.mem.nextSize
    2360           2 :         // The next memtable should be double the size, up to Options.MemTableSize.
    2361           2 :         if d.mu.mem.nextSize < d.opts.MemTableSize {
    2362           2 :                 d.mu.mem.nextSize = min(2*d.mu.mem.nextSize, d.opts.MemTableSize)
    2363           2 :         }
    2364             : 
    2365           2 :         memtblOpts := memTableOptions{
    2366           2 :                 Options:   d.opts,
    2367           2 :                 logSeqNum: logSeqNum,
    2368           2 :         }
    2369           2 : 
    2370           2 :         // Before attempting to allocate a new memtable, check if there's one
    2371           2 :         // available for recycling in memTableRecycle. Large contiguous allocations
    2372           2 :         // can be costly as fragmentation makes it more difficult to find a large
    2373           2 :         // contiguous free space. We've observed 64MB allocations taking 10ms+.
    2374           2 :         //
    2375           2 :         // To reduce these costly allocations, up to 1 obsolete memtable is stashed
    2376           2 :         // in `d.memTableRecycle` to allow a future memtable rotation to reuse
    2377           2 :         // existing memory.
    2378           2 :         var mem *memTable
    2379           2 :         mem = d.memTableRecycle.Swap(nil)
    2380           2 :         if mem != nil && uint64(len(mem.arenaBuf)) != size {
    2381           2 :                 d.freeMemTable(mem)
    2382           2 :                 mem = nil
    2383           2 :         }
    2384           2 :         if mem != nil {
    2385           2 :                 // Carry through the existing buffer and memory reservation.
    2386           2 :                 memtblOpts.arenaBuf = mem.arenaBuf
    2387           2 :                 memtblOpts.releaseAccountingReservation = mem.releaseAccountingReservation
    2388           2 :         } else {
    2389           2 :                 mem = new(memTable)
    2390           2 :                 memtblOpts.arenaBuf = manual.New(int(size))
    2391           2 :                 memtblOpts.releaseAccountingReservation = d.opts.Cache.Reserve(int(size))
    2392           2 :                 d.memTableCount.Add(1)
    2393           2 :                 d.memTableReserved.Add(int64(size))
    2394           2 : 
    2395           2 :                 // Note: this is a no-op if invariants are disabled or race is enabled.
    2396           2 :                 invariants.SetFinalizer(mem, checkMemTable)
    2397           2 :         }
    2398           2 :         mem.init(memtblOpts)
    2399           2 : 
    2400           2 :         entry := d.newFlushableEntry(mem, logNum, logSeqNum)
    2401           2 :         entry.releaseMemAccounting = func() {
    2402           2 :                 // If the user leaks iterators, we may be releasing the memtable after
    2403           2 :                 // the DB is already closed. In this case, we want to just release the
    2404           2 :                 // memory because DB.Close won't come along to free it for us.
    2405           2 :                 if err := d.closed.Load(); err != nil {
    2406           2 :                         d.freeMemTable(mem)
    2407           2 :                         return
    2408           2 :                 }
    2409             : 
    2410             :                 // The next memtable allocation might be able to reuse this memtable.
    2411             :                 // Stash it on d.memTableRecycle.
    2412           2 :                 if unusedMem := d.memTableRecycle.Swap(mem); unusedMem != nil {
    2413           2 :                         // There was already a memtable waiting to be recycled. We're now
    2414           2 :                         // responsible for freeing it.
    2415           2 :                         d.freeMemTable(unusedMem)
    2416           2 :                 }
    2417             :         }
    2418           2 :         return mem, entry
    2419             : }
    2420             : 
    2421           2 : func (d *DB) freeMemTable(m *memTable) {
    2422           2 :         d.memTableCount.Add(-1)
    2423           2 :         d.memTableReserved.Add(-int64(len(m.arenaBuf)))
    2424           2 :         m.free()
    2425           2 : }
    2426             : 
    2427             : func (d *DB) newFlushableEntry(
    2428             :         f flushable, logNum base.DiskFileNum, logSeqNum uint64,
    2429           2 : ) *flushableEntry {
    2430           2 :         fe := &flushableEntry{
    2431           2 :                 flushable:      f,
    2432           2 :                 flushed:        make(chan struct{}),
    2433           2 :                 logNum:         logNum,
    2434           2 :                 logSeqNum:      logSeqNum,
    2435           2 :                 deleteFn:       d.mu.versions.addObsolete,
    2436           2 :                 deleteFnLocked: d.mu.versions.addObsoleteLocked,
    2437           2 :         }
    2438           2 :         fe.readerRefs.Store(1)
    2439           2 :         return fe
    2440           2 : }
    2441             : 
    2442             : // maybeInduceWriteStall is called before performing a memtable rotation in
    2443             : // makeRoomForWrite. In some conditions, we prefer to stall the user's write
    2444             : // workload rather than continuing to accept writes that may result in resource
    2445             : // exhaustion or prohibitively slow reads.
    2446             : //
    2447             : // There are a couple reasons we might wait to rotate the memtable and
    2448             : // instead induce a write stall:
    2449             : //  1. If too many memtables have queued, we wait for a flush to finish before
    2450             : //     creating another memtable.
    2451             : //  2. If L0 read amplification has grown too high, we wait for compactions
    2452             : //     to reduce the read amplification before accepting more writes that will
    2453             : //     increase write pressure.
    2454             : //
    2455             : // maybeInduceWriteStall checks these stall conditions, and if present, waits
    2456             : // for them to abate.
    2457           2 : func (d *DB) maybeInduceWriteStall(b *Batch) {
    2458           2 :         stalled := false
    2459           2 :         // This function will call EventListener.WriteStallBegin at most once.  If
    2460           2 :         // it does call it, it will call EventListener.WriteStallEnd once before
    2461           2 :         // returning.
    2462           2 :         for {
    2463           2 :                 var size uint64
    2464           2 :                 for i := range d.mu.mem.queue {
    2465           2 :                         size += d.mu.mem.queue[i].totalBytes()
    2466           2 :                 }
    2467             :                 // If ElevateWriteStallThresholdForFailover is true, we give an
    2468             :                 // unlimited memory budget for memtables. This is simpler than trying to
    2469             :                 // configure an explicit value, given that memory resources can vary.
    2470             :                 // When using WAL failover in CockroachDB, an OOM risk is worth
    2471             :                 // tolerating for workloads that have a strict latency SLO. Also, an
    2472             :                 // unlimited budget here does not mean that the disk stall in the
    2473             :                 // primary will go unnoticed until the OOM -- CockroachDB is monitoring
    2474             :                 // disk stalls, and we expect it to fail the node after ~60s if the
    2475             :                 // primary is stalled.
    2476           2 :                 if size >= uint64(d.opts.MemTableStopWritesThreshold)*d.opts.MemTableSize &&
    2477           2 :                         !d.mu.log.manager.ElevateWriteStallThresholdForFailover() {
    2478           2 :                         // We have filled up the current memtable, but already queued memtables
    2479           2 :                         // are still flushing, so we wait.
    2480           2 :                         if !stalled {
    2481           2 :                                 stalled = true
    2482           2 :                                 d.opts.EventListener.WriteStallBegin(WriteStallBeginInfo{
    2483           2 :                                         Reason: "memtable count limit reached",
    2484           2 :                                 })
    2485           2 :                         }
    2486           2 :                         now := time.Now()
    2487           2 :                         d.mu.compact.cond.Wait()
    2488           2 :                         if b != nil {
    2489           2 :                                 b.commitStats.MemTableWriteStallDuration += time.Since(now)
    2490           2 :                         }
    2491           2 :                         continue
    2492             :                 }
    2493           2 :                 l0ReadAmp := d.mu.versions.currentVersion().L0Sublevels.ReadAmplification()
    2494           2 :                 if l0ReadAmp >= d.opts.L0StopWritesThreshold {
    2495           2 :                         // There are too many level-0 files, so we wait.
    2496           2 :                         if !stalled {
    2497           2 :                                 stalled = true
    2498           2 :                                 d.opts.EventListener.WriteStallBegin(WriteStallBeginInfo{
    2499           2 :                                         Reason: "L0 file count limit exceeded",
    2500           2 :                                 })
    2501           2 :                         }
    2502           2 :                         now := time.Now()
    2503           2 :                         d.mu.compact.cond.Wait()
    2504           2 :                         if b != nil {
    2505           2 :                                 b.commitStats.L0ReadAmpWriteStallDuration += time.Since(now)
    2506           2 :                         }
    2507           2 :                         continue
    2508             :                 }
    2509             :                 // Not stalled.
    2510           2 :                 if stalled {
    2511           2 :                         d.opts.EventListener.WriteStallEnd()
    2512           2 :                 }
    2513           2 :                 return
    2514             :         }
    2515             : }
    2516             : 
    2517             : // makeRoomForWrite rotates the current mutable memtable, ensuring that the
    2518             : // resulting mutable memtable has room to hold the contents of the provided
    2519             : // Batch. The current memtable is rotated (marked as immutable) and a new
    2520             : // mutable memtable is allocated. It reserves space in the new memtable and adds
    2521             : // a reference to the memtable. The caller must later ensure that the memtable
    2522             : // is unreferenced. This memtable rotation also causes a log rotation.
    2523             : //
    2524             : // If the current memtable is not full but the caller wishes to trigger a
    2525             : // rotation regardless, the caller may pass a nil Batch, and no space in the
    2526             : // resulting mutable memtable will be reserved.
    2527             : //
    2528             : // Both DB.mu and commitPipeline.mu must be held by the caller. Note that DB.mu
    2529             : // may be released and reacquired.
    2530           2 : func (d *DB) makeRoomForWrite(b *Batch) error {
    2531           2 :         if b != nil && b.ingestedSSTBatch {
    2532           0 :                 panic("pebble: invalid function call")
    2533             :         }
    2534           2 :         d.maybeInduceWriteStall(b)
    2535           2 : 
    2536           2 :         var newLogNum base.DiskFileNum
    2537           2 :         var prevLogSize uint64
    2538           2 :         if !d.opts.DisableWAL {
    2539           2 :                 now := time.Now()
    2540           2 :                 newLogNum, prevLogSize = d.rotateWAL()
    2541           2 :                 if b != nil {
    2542           2 :                         b.commitStats.WALRotationDuration += time.Since(now)
    2543           2 :                 }
    2544             :         }
    2545           2 :         immMem := d.mu.mem.mutable
    2546           2 :         imm := d.mu.mem.queue[len(d.mu.mem.queue)-1]
    2547           2 :         imm.logSize = prevLogSize
    2548           2 : 
    2549           2 :         var logSeqNum uint64
    2550           2 :         var minSize uint64
    2551           2 :         if b != nil {
    2552           2 :                 logSeqNum = b.SeqNum()
    2553           2 :                 if b.flushable != nil {
    2554           2 :                         logSeqNum += uint64(b.Count())
    2555           2 :                         // The batch is too large to fit in the memtable so add it directly to
    2556           2 :                         // the immutable queue. The flushable batch is associated with the same
    2557           2 :                         // log as the immutable memtable, but logically occurs after it in
    2558           2 :                         // seqnum space. We ensure while flushing that the flushable batch
    2559           2 :                         // is flushed along with the previous memtable in the flushable
    2560           2 :                         // queue. See the top level comment in DB.flush1 to learn how this
    2561           2 :                         // is ensured.
    2562           2 :                         //
    2563           2 :                         // See DB.commitWrite for the special handling of log writes for large
    2564           2 :                         // batches. In particular, the large batch has already written to
    2565           2 :                         // imm.logNum.
    2566           2 :                         entry := d.newFlushableEntry(b.flushable, imm.logNum, b.SeqNum())
    2567           2 :                         // The large batch is by definition large. Reserve space from the cache
    2568           2 :                         // for it until it is flushed.
    2569           2 :                         entry.releaseMemAccounting = d.opts.Cache.Reserve(int(b.flushable.totalBytes()))
    2570           2 :                         d.mu.mem.queue = append(d.mu.mem.queue, entry)
    2571           2 :                 } else {
    2572           2 :                         minSize = b.memTableSize
    2573           2 :                 }
    2574           2 :         } else {
    2575           2 :                 // b == nil
    2576           2 :                 //
    2577           2 :                 // This is a manual forced flush.
    2578           2 :                 logSeqNum = d.mu.versions.logSeqNum.Load()
    2579           2 :                 imm.flushForced = true
    2580           2 :                 // If we are manually flushing and we used less than half of the bytes in
    2581           2 :                 // the memtable, don't increase the size for the next memtable. This
    2582           2 :                 // reduces memtable memory pressure when an application is frequently
    2583           2 :                 // manually flushing.
    2584           2 :                 if uint64(immMem.availBytes()) > immMem.totalBytes()/2 {
    2585           2 :                         d.mu.mem.nextSize = immMem.totalBytes()
    2586           2 :                 }
    2587             :         }
    2588           2 :         d.rotateMemtable(newLogNum, logSeqNum, immMem, minSize)
    2589           2 :         if b != nil && b.flushable == nil {
    2590           2 :                 err := d.mu.mem.mutable.prepare(b)
    2591           2 :                 // Reserving enough space for the batch after rotation must never fail.
    2592           2 :                 // We pass in a minSize that's equal to b.memtableSize to ensure that
    2593           2 :                 // memtable rotation allocates a memtable sufficiently large. We also
    2594           2 :                 // held d.commit.mu for the entirety of this function, ensuring that no
    2595           2 :                 // other committers may have reserved memory in the new memtable yet.
    2596           2 :                 if err == arenaskl.ErrArenaFull {
    2597           0 :                         panic(errors.AssertionFailedf("memtable still full after rotation"))
    2598             :                 }
    2599           2 :                 return err
    2600             :         }
    2601           2 :         return nil
    2602             : }
    2603             : 
    2604             : // Both DB.mu and commitPipeline.mu must be held by the caller.
    2605             : func (d *DB) rotateMemtable(
    2606             :         newLogNum base.DiskFileNum, logSeqNum uint64, prev *memTable, minSize uint64,
    2607           2 : ) {
    2608           2 :         // Create a new memtable, scheduling the previous one for flushing. We do
    2609           2 :         // this even if the previous memtable was empty because the DB.Flush
    2610           2 :         // mechanism is dependent on being able to wait for the empty memtable to
    2611           2 :         // flush. We can't just mark the empty memtable as flushed here because we
    2612           2 :         // also have to wait for all previous immutable tables to
    2613           2 :         // flush. Additionally, the memtable is tied to particular WAL file and we
    2614           2 :         // want to go through the flush path in order to recycle that WAL file.
    2615           2 :         //
    2616           2 :         // NB: newLogNum corresponds to the WAL that contains mutations that are
    2617           2 :         // present in the new memtable. When immutable memtables are flushed to
    2618           2 :         // disk, a VersionEdit will be created telling the manifest the minimum
    2619           2 :         // unflushed log number (which will be the next one in d.mu.mem.mutable
    2620           2 :         // that was not flushed).
    2621           2 :         //
    2622           2 :         // NB: prev should be the current mutable memtable.
    2623           2 :         var entry *flushableEntry
    2624           2 :         d.mu.mem.mutable, entry = d.newMemTable(newLogNum, logSeqNum, minSize)
    2625           2 :         d.mu.mem.queue = append(d.mu.mem.queue, entry)
    2626           2 :         // d.logSize tracks the log size of the WAL file corresponding to the most
    2627           2 :         // recent flushable. The log size of the previous mutable memtable no longer
    2628           2 :         // applies to the current mutable memtable.
    2629           2 :         //
    2630           2 :         // It's tempting to perform this update in rotateWAL, but that would not be
    2631           2 :         // atomic with the enqueue of the new flushable. A call to DB.Metrics()
    2632           2 :         // could acquire DB.mu after the WAL has been rotated but before the new
    2633           2 :         // memtable has been appended; this would result in omitting the log size of
    2634           2 :         // the most recent flushable.
    2635           2 :         d.logSize.Store(0)
    2636           2 :         d.updateReadStateLocked(nil)
    2637           2 :         if prev.writerUnref() {
    2638           2 :                 d.maybeScheduleFlush()
    2639           2 :         }
    2640             : }
    2641             : 
    2642             : // rotateWAL creates a new write-ahead log, possibly recycling a previous WAL's
    2643             : // files. It returns the file number assigned to the new WAL, and the size of
    2644             : // the previous WAL file.
    2645             : //
    2646             : // Both DB.mu and commitPipeline.mu must be held by the caller. Note that DB.mu
    2647             : // may be released and reacquired.
    2648           2 : func (d *DB) rotateWAL() (newLogNum base.DiskFileNum, prevLogSize uint64) {
    2649           2 :         if d.opts.DisableWAL {
    2650           0 :                 panic("pebble: invalid function call")
    2651             :         }
    2652           2 :         jobID := d.newJobIDLocked()
    2653           2 :         newLogNum = d.mu.versions.getNextDiskFileNum()
    2654           2 : 
    2655           2 :         d.mu.Unlock()
    2656           2 :         // Close the previous log first. This writes an EOF trailer
    2657           2 :         // signifying the end of the file and syncs it to disk. We must
    2658           2 :         // close the previous log before linking the new log file,
    2659           2 :         // otherwise a crash could leave both logs with unclean tails, and
    2660           2 :         // Open will treat the previous log as corrupt.
    2661           2 :         offset, err := d.mu.log.writer.Close()
    2662           2 :         if err != nil {
    2663           0 :                 // What to do here? Stumbling on doesn't seem worthwhile. If we failed to
    2664           0 :                 // close the previous log it is possible we lost a write.
    2665           0 :                 panic(err)
    2666             :         }
    2667           2 :         prevLogSize = uint64(offset)
    2668           2 :         metrics := d.mu.log.writer.Metrics()
    2669           2 : 
    2670           2 :         d.mu.Lock()
    2671           2 :         if err := d.mu.log.metrics.LogWriterMetrics.Merge(&metrics); err != nil {
    2672           0 :                 d.opts.Logger.Errorf("metrics error: %s", err)
    2673           0 :         }
    2674             : 
    2675           2 :         d.mu.Unlock()
    2676           2 :         writer, err := d.mu.log.manager.Create(wal.NumWAL(newLogNum), int(jobID))
    2677           2 :         if err != nil {
    2678           0 :                 panic(err)
    2679             :         }
    2680             : 
    2681           2 :         d.mu.Lock()
    2682           2 :         d.mu.log.writer = writer
    2683           2 :         return newLogNum, prevLogSize
    2684             : }
    2685             : 
    2686           2 : func (d *DB) getEarliestUnflushedSeqNumLocked() uint64 {
    2687           2 :         seqNum := InternalKeySeqNumMax
    2688           2 :         for i := range d.mu.mem.queue {
    2689           2 :                 logSeqNum := d.mu.mem.queue[i].logSeqNum
    2690           2 :                 if seqNum > logSeqNum {
    2691           2 :                         seqNum = logSeqNum
    2692           2 :                 }
    2693             :         }
    2694           2 :         return seqNum
    2695             : }
    2696             : 
    2697           2 : func (d *DB) getInProgressCompactionInfoLocked(finishing *compaction) (rv []compactionInfo) {
    2698           2 :         for c := range d.mu.compact.inProgress {
    2699           2 :                 if len(c.flushing) == 0 && (finishing == nil || c != finishing) {
    2700           2 :                         info := compactionInfo{
    2701           2 :                                 versionEditApplied: c.versionEditApplied,
    2702           2 :                                 inputs:             c.inputs,
    2703           2 :                                 smallest:           c.smallest,
    2704           2 :                                 largest:            c.largest,
    2705           2 :                                 outputLevel:        -1,
    2706           2 :                         }
    2707           2 :                         if c.outputLevel != nil {
    2708           2 :                                 info.outputLevel = c.outputLevel.level
    2709           2 :                         }
    2710           2 :                         rv = append(rv, info)
    2711             :                 }
    2712             :         }
    2713           2 :         return
    2714             : }
    2715             : 
    2716           2 : func inProgressL0Compactions(inProgress []compactionInfo) []manifest.L0Compaction {
    2717           2 :         var compactions []manifest.L0Compaction
    2718           2 :         for _, info := range inProgress {
    2719           2 :                 // Skip in-progress compactions that have already committed; the L0
    2720           2 :                 // sublevels initialization code requires the set of in-progress
    2721           2 :                 // compactions to be consistent with the current version. Compactions
    2722           2 :                 // with versionEditApplied=true are already applied to the current
    2723           2 :                 // version and but are performing cleanup without the database mutex.
    2724           2 :                 if info.versionEditApplied {
    2725           2 :                         continue
    2726             :                 }
    2727           2 :                 l0 := false
    2728           2 :                 for _, cl := range info.inputs {
    2729           2 :                         l0 = l0 || cl.level == 0
    2730           2 :                 }
    2731           2 :                 if !l0 {
    2732           2 :                         continue
    2733             :                 }
    2734           2 :                 compactions = append(compactions, manifest.L0Compaction{
    2735           2 :                         Smallest:  info.smallest,
    2736           2 :                         Largest:   info.largest,
    2737           2 :                         IsIntraL0: info.outputLevel == 0,
    2738           2 :                 })
    2739             :         }
    2740           2 :         return compactions
    2741             : }
    2742             : 
    2743             : // firstError returns the first non-nil error of err0 and err1, or nil if both
    2744             : // are nil.
    2745           2 : func firstError(err0, err1 error) error {
    2746           2 :         if err0 != nil {
    2747           2 :                 return err0
    2748           2 :         }
    2749           2 :         return err1
    2750             : }
    2751             : 
    2752             : // SetCreatorID sets the CreatorID which is needed in order to use shared objects.
    2753             : // Remote object usage is disabled until this method is called the first time.
    2754             : // Once set, the Creator ID is persisted and cannot change.
    2755             : //
    2756             : // Does nothing if SharedStorage was not set in the options when the DB was
    2757             : // opened or if the DB is in read-only mode.
    2758           2 : func (d *DB) SetCreatorID(creatorID uint64) error {
    2759           2 :         if d.opts.Experimental.RemoteStorage == nil || d.opts.ReadOnly {
    2760           0 :                 return nil
    2761           0 :         }
    2762           2 :         return d.objProvider.SetCreatorID(objstorage.CreatorID(creatorID))
    2763             : }
    2764             : 
    2765             : // KeyStatistics keeps track of the number of keys that have been pinned by a
    2766             : // snapshot as well as counts of the different key kinds in the lsm.
    2767             : //
    2768             : // One way of using the accumulated stats, when we only have sets and dels,
    2769             : // and say the counts are represented as del_count, set_count,
    2770             : // del_latest_count, set_latest_count, snapshot_pinned_count.
    2771             : //
    2772             : //   - del_latest_count + set_latest_count is the set of unique user keys
    2773             : //     (unique).
    2774             : //
    2775             : //   - set_latest_count is the set of live unique user keys (live_unique).
    2776             : //
    2777             : //   - Garbage is del_count + set_count - live_unique.
    2778             : //
    2779             : //   - If everything were in the LSM, del_count+set_count-snapshot_pinned_count
    2780             : //     would also be the set of unique user keys (note that
    2781             : //     snapshot_pinned_count is counting something different -- see comment below).
    2782             : //     But snapshot_pinned_count only counts keys in the LSM so the excess here
    2783             : //     must be keys in memtables.
    2784             : type KeyStatistics struct {
    2785             :         // TODO(sumeer): the SnapshotPinned* are incorrect in that these older
    2786             :         // versions can be in a different level. Either fix the accounting or
    2787             :         // rename these fields.
    2788             : 
    2789             :         // SnapshotPinnedKeys represents obsolete keys that cannot be elided during
    2790             :         // a compaction, because they are required by an open snapshot.
    2791             :         SnapshotPinnedKeys int
    2792             :         // SnapshotPinnedKeysBytes is the total number of bytes of all snapshot
    2793             :         // pinned keys.
    2794             :         SnapshotPinnedKeysBytes uint64
    2795             :         // KindsCount is the count for each kind of key. It includes point keys,
    2796             :         // range deletes and range keys.
    2797             :         KindsCount [InternalKeyKindMax + 1]int
    2798             :         // LatestKindsCount is the count for each kind of key when it is the latest
    2799             :         // kind for a user key. It is only populated for point keys.
    2800             :         LatestKindsCount [InternalKeyKindMax + 1]int
    2801             : }
    2802             : 
    2803             : // LSMKeyStatistics is used by DB.ScanStatistics.
    2804             : type LSMKeyStatistics struct {
    2805             :         Accumulated KeyStatistics
    2806             :         // Levels contains statistics only for point keys. Range deletions and range keys will
    2807             :         // appear in Accumulated but not Levels.
    2808             :         Levels [numLevels]KeyStatistics
    2809             :         // BytesRead represents the logical, pre-compression size of keys and values read
    2810             :         BytesRead uint64
    2811             : }
    2812             : 
    2813             : // ScanStatisticsOptions is used by DB.ScanStatistics.
    2814             : type ScanStatisticsOptions struct {
    2815             :         // LimitBytesPerSecond indicates the number of bytes that are able to be read
    2816             :         // per second using ScanInternal.
    2817             :         // A value of 0 indicates that there is no limit set.
    2818             :         LimitBytesPerSecond int64
    2819             : }
    2820             : 
    2821             : // ScanStatistics returns the count of different key kinds within the lsm for a
    2822             : // key span [lower, upper) as well as the number of snapshot keys.
    2823             : func (d *DB) ScanStatistics(
    2824             :         ctx context.Context, lower, upper []byte, opts ScanStatisticsOptions,
    2825           1 : ) (LSMKeyStatistics, error) {
    2826           1 :         stats := LSMKeyStatistics{}
    2827           1 :         var prevKey InternalKey
    2828           1 :         var rateLimitFunc func(key *InternalKey, val LazyValue) error
    2829           1 :         tb := tokenbucket.TokenBucket{}
    2830           1 : 
    2831           1 :         if opts.LimitBytesPerSecond != 0 {
    2832           0 :                 // Each "token" roughly corresponds to a byte that was read.
    2833           0 :                 tb.Init(tokenbucket.TokensPerSecond(opts.LimitBytesPerSecond), tokenbucket.Tokens(1024))
    2834           0 :                 rateLimitFunc = func(key *InternalKey, val LazyValue) error {
    2835           0 :                         return tb.WaitCtx(ctx, tokenbucket.Tokens(key.Size()+val.Len()))
    2836           0 :                 }
    2837             :         }
    2838             : 
    2839           1 :         scanInternalOpts := &scanInternalOptions{
    2840           1 :                 visitPointKey: func(key *InternalKey, value LazyValue, iterInfo IteratorLevel) error {
    2841           1 :                         // If the previous key is equal to the current point key, the current key was
    2842           1 :                         // pinned by a snapshot.
    2843           1 :                         size := uint64(key.Size())
    2844           1 :                         kind := key.Kind()
    2845           1 :                         sameKey := d.equal(prevKey.UserKey, key.UserKey)
    2846           1 :                         if iterInfo.Kind == IteratorLevelLSM && sameKey {
    2847           1 :                                 stats.Levels[iterInfo.Level].SnapshotPinnedKeys++
    2848           1 :                                 stats.Levels[iterInfo.Level].SnapshotPinnedKeysBytes += size
    2849           1 :                                 stats.Accumulated.SnapshotPinnedKeys++
    2850           1 :                                 stats.Accumulated.SnapshotPinnedKeysBytes += size
    2851           1 :                         }
    2852           1 :                         if iterInfo.Kind == IteratorLevelLSM {
    2853           1 :                                 stats.Levels[iterInfo.Level].KindsCount[kind]++
    2854           1 :                         }
    2855           1 :                         if !sameKey {
    2856           1 :                                 if iterInfo.Kind == IteratorLevelLSM {
    2857           1 :                                         stats.Levels[iterInfo.Level].LatestKindsCount[kind]++
    2858           1 :                                 }
    2859           1 :                                 stats.Accumulated.LatestKindsCount[kind]++
    2860             :                         }
    2861             : 
    2862           1 :                         stats.Accumulated.KindsCount[kind]++
    2863           1 :                         prevKey.CopyFrom(*key)
    2864           1 :                         stats.BytesRead += uint64(key.Size() + value.Len())
    2865           1 :                         return nil
    2866             :                 },
    2867           0 :                 visitRangeDel: func(start, end []byte, seqNum uint64) error {
    2868           0 :                         stats.Accumulated.KindsCount[InternalKeyKindRangeDelete]++
    2869           0 :                         stats.BytesRead += uint64(len(start) + len(end))
    2870           0 :                         return nil
    2871           0 :                 },
    2872           0 :                 visitRangeKey: func(start, end []byte, keys []rangekey.Key) error {
    2873           0 :                         stats.BytesRead += uint64(len(start) + len(end))
    2874           0 :                         for _, key := range keys {
    2875           0 :                                 stats.Accumulated.KindsCount[key.Kind()]++
    2876           0 :                                 stats.BytesRead += uint64(len(key.Value) + len(key.Suffix))
    2877           0 :                         }
    2878           0 :                         return nil
    2879             :                 },
    2880             :                 includeObsoleteKeys: true,
    2881             :                 IterOptions: IterOptions{
    2882             :                         KeyTypes:   IterKeyTypePointsAndRanges,
    2883             :                         LowerBound: lower,
    2884             :                         UpperBound: upper,
    2885             :                 },
    2886             :                 rateLimitFunc: rateLimitFunc,
    2887             :         }
    2888           1 :         iter, err := d.newInternalIter(ctx, snapshotIterOpts{}, scanInternalOpts)
    2889           1 :         if err != nil {
    2890           0 :                 return LSMKeyStatistics{}, err
    2891           0 :         }
    2892           1 :         defer iter.close()
    2893           1 : 
    2894           1 :         err = scanInternalImpl(ctx, lower, upper, iter, scanInternalOpts)
    2895           1 : 
    2896           1 :         if err != nil {
    2897           0 :                 return LSMKeyStatistics{}, err
    2898           0 :         }
    2899             : 
    2900           1 :         return stats, nil
    2901             : }
    2902             : 
    2903             : // ObjProvider returns the objstorage.Provider for this database. Meant to be
    2904             : // used for internal purposes only.
    2905           2 : func (d *DB) ObjProvider() objstorage.Provider {
    2906           2 :         return d.objProvider
    2907           2 : }
    2908             : 
    2909           1 : func (d *DB) checkVirtualBounds(m *fileMetadata) {
    2910           1 :         if !invariants.Enabled {
    2911           0 :                 return
    2912           0 :         }
    2913             : 
    2914           1 :         objMeta, err := d.objProvider.Lookup(fileTypeTable, m.FileBacking.DiskFileNum)
    2915           1 :         if err != nil {
    2916           0 :                 panic(err)
    2917             :         }
    2918           1 :         if objMeta.IsExternal() {
    2919           0 :                 // Nothing to do; bounds are expected to be loose.
    2920           0 :                 return
    2921           0 :         }
    2922             : 
    2923           1 :         iters, err := d.newIters(context.TODO(), m, nil, internalIterOpts{}, iterPointKeys|iterRangeDeletions|iterRangeKeys)
    2924           1 :         if err != nil {
    2925           0 :                 panic(errors.Wrap(err, "pebble: error creating iterators"))
    2926             :         }
    2927           1 :         defer iters.CloseAll()
    2928           1 : 
    2929           1 :         if m.HasPointKeys {
    2930           1 :                 pointIter := iters.Point()
    2931           1 :                 rangeDelIter := iters.RangeDeletion()
    2932           1 : 
    2933           1 :                 // Check that the lower bound is tight.
    2934           1 :                 pointKV := pointIter.First()
    2935           1 :                 rangeDel, err := rangeDelIter.First()
    2936           1 :                 if err != nil {
    2937           0 :                         panic(err)
    2938             :                 }
    2939           1 :                 if (rangeDel == nil || d.cmp(rangeDel.SmallestKey().UserKey, m.SmallestPointKey.UserKey) != 0) &&
    2940           1 :                         (pointKV == nil || d.cmp(pointKV.K.UserKey, m.SmallestPointKey.UserKey) != 0) {
    2941           0 :                         panic(errors.Newf("pebble: virtual sstable %s lower point key bound is not tight", m.FileNum))
    2942             :                 }
    2943             : 
    2944             :                 // Check that the upper bound is tight.
    2945           1 :                 pointKV = pointIter.Last()
    2946           1 :                 rangeDel, err = rangeDelIter.Last()
    2947           1 :                 if err != nil {
    2948           0 :                         panic(err)
    2949             :                 }
    2950           1 :                 if (rangeDel == nil || d.cmp(rangeDel.LargestKey().UserKey, m.LargestPointKey.UserKey) != 0) &&
    2951           1 :                         (pointKV == nil || d.cmp(pointKV.K.UserKey, m.LargestPointKey.UserKey) != 0) {
    2952           0 :                         panic(errors.Newf("pebble: virtual sstable %s upper point key bound is not tight", m.FileNum))
    2953             :                 }
    2954             : 
    2955             :                 // Check that iterator keys are within bounds.
    2956           1 :                 for kv := pointIter.First(); kv != nil; kv = pointIter.Next() {
    2957           1 :                         if d.cmp(kv.K.UserKey, m.SmallestPointKey.UserKey) < 0 || d.cmp(kv.K.UserKey, m.LargestPointKey.UserKey) > 0 {
    2958           0 :                                 panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, kv.K.UserKey))
    2959             :                         }
    2960             :                 }
    2961           1 :                 s, err := rangeDelIter.First()
    2962           1 :                 for ; s != nil; s, err = rangeDelIter.Next() {
    2963           1 :                         if d.cmp(s.SmallestKey().UserKey, m.SmallestPointKey.UserKey) < 0 {
    2964           0 :                                 panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, s.SmallestKey().UserKey))
    2965             :                         }
    2966           1 :                         if d.cmp(s.LargestKey().UserKey, m.LargestPointKey.UserKey) > 0 {
    2967           0 :                                 panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, s.LargestKey().UserKey))
    2968             :                         }
    2969             :                 }
    2970           1 :                 if err != nil {
    2971           0 :                         panic(err)
    2972             :                 }
    2973             :         }
    2974             : 
    2975           1 :         if !m.HasRangeKeys {
    2976           1 :                 return
    2977           1 :         }
    2978           0 :         rangeKeyIter := iters.RangeKey()
    2979           0 : 
    2980           0 :         // Check that the lower bound is tight.
    2981           0 :         if s, err := rangeKeyIter.First(); err != nil {
    2982           0 :                 panic(err)
    2983           0 :         } else if d.cmp(s.SmallestKey().UserKey, m.SmallestRangeKey.UserKey) != 0 {
    2984           0 :                 panic(errors.Newf("pebble: virtual sstable %s lower range key bound is not tight", m.FileNum))
    2985             :         }
    2986             : 
    2987             :         // Check that upper bound is tight.
    2988           0 :         if s, err := rangeKeyIter.Last(); err != nil {
    2989           0 :                 panic(err)
    2990           0 :         } else if d.cmp(s.LargestKey().UserKey, m.LargestRangeKey.UserKey) != 0 {
    2991           0 :                 panic(errors.Newf("pebble: virtual sstable %s upper range key bound is not tight", m.FileNum))
    2992             :         }
    2993             : 
    2994           0 :         s, err := rangeKeyIter.First()
    2995           0 :         for ; s != nil; s, err = rangeKeyIter.Next() {
    2996           0 :                 if d.cmp(s.SmallestKey().UserKey, m.SmallestRangeKey.UserKey) < 0 {
    2997           0 :                         panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, s.SmallestKey().UserKey))
    2998             :                 }
    2999           0 :                 if d.cmp(s.LargestKey().UserKey, m.LargestRangeKey.UserKey) > 0 {
    3000           0 :                         panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, s.LargestKey().UserKey))
    3001             :                 }
    3002             :         }
    3003           0 :         if err != nil {
    3004           0 :                 panic(err)
    3005             :         }
    3006             : }
    3007             : 
    3008             : // DebugString returns a debugging string describing the LSM.
    3009           0 : func (d *DB) DebugString() string {
    3010           0 :         d.mu.Lock()
    3011           0 :         defer d.mu.Unlock()
    3012           0 :         return d.mu.versions.currentVersion().DebugString()
    3013           0 : }

Generated by: LCOV version 1.14