LCOV - code coverage report
Current view: top level - pebble - table_stats.go (source / functions) Coverage Total Hit
Test: 2025-07-07 08:18Z 6f57a213 - meta test only.lcov Lines: 88.5 % 714 632
Test Date: 2025-07-07 08:20:19 Functions: - 0 0

            Line data    Source code
       1              : // Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
       2              : // of this source code is governed by a BSD-style license that can be found in
       3              : // the LICENSE file.
       4              : 
       5              : package pebble
       6              : 
       7              : import (
       8              :         "context"
       9              :         "fmt"
      10              :         "math"
      11              :         "slices"
      12              :         "time"
      13              : 
      14              :         "github.com/cockroachdb/crlib/crtime"
      15              :         "github.com/cockroachdb/errors"
      16              :         "github.com/cockroachdb/pebble/internal/base"
      17              :         "github.com/cockroachdb/pebble/internal/invariants"
      18              :         "github.com/cockroachdb/pebble/internal/keyspan"
      19              :         "github.com/cockroachdb/pebble/internal/keyspan/keyspanimpl"
      20              :         "github.com/cockroachdb/pebble/internal/manifest"
      21              :         "github.com/cockroachdb/pebble/sstable"
      22              :         "github.com/cockroachdb/pebble/sstable/block"
      23              :         "github.com/cockroachdb/redact"
      24              : )
      25              : 
      26              : // In-memory statistics about tables help inform compaction picking, but may
      27              : // be expensive to calculate or load from disk. Every time a database is
      28              : // opened, these statistics must be reloaded or recalculated. To minimize
      29              : // impact on user activity and compactions, we load these statistics
      30              : // asynchronously in the background and store loaded statistics in each
      31              : // table's *TableMetadata.
      32              : //
      33              : // This file implements the asynchronous loading of statistics by maintaining
      34              : // a list of files that require statistics, alongside their LSM levels.
      35              : // Whenever new files are added to the LSM, the files are appended to
      36              : // d.mu.tableStats.pending. If a stats collection job is not currently
      37              : // running, one is started in a separate goroutine.
      38              : //
      39              : // The stats collection job grabs and clears the pending list, computes table
      40              : // statistics relative to the current readState and updates the tables' file
      41              : // metadata. New pending files may accumulate during a stats collection job,
      42              : // so a completing job triggers a new job if necessary. Only one job runs at a
      43              : // time.
      44              : //
      45              : // When an existing database is opened, all files lack in-memory statistics.
      46              : // These files' stats are loaded incrementally whenever the pending list is
      47              : // empty by scanning a current readState for files missing statistics. Once a
      48              : // job completes a scan without finding any remaining files without
      49              : // statistics, it flips a `loadedInitial` flag. From then on, the stats
      50              : // collection job only needs to load statistics for new files appended to the
      51              : // pending list.
      52              : 
      53            1 : func (d *DB) maybeCollectTableStatsLocked() {
      54            1 :         if d.shouldCollectTableStatsLocked() {
      55            1 :                 go d.collectTableStats()
      56            1 :         }
      57              : }
      58              : 
      59              : // updateTableStatsLocked is called when new files are introduced, after the
      60              : // read state has been updated. It may trigger a new stat collection.
      61              : // DB.mu must be locked when calling.
      62            1 : func (d *DB) updateTableStatsLocked(newTables []manifest.NewTableEntry) {
      63            1 :         var needStats bool
      64            1 :         for _, nf := range newTables {
      65            1 :                 if !nf.Meta.StatsValid() {
      66            1 :                         needStats = true
      67            1 :                         break
      68              :                 }
      69              :         }
      70            1 :         if !needStats {
      71            1 :                 return
      72            1 :         }
      73              : 
      74            1 :         d.mu.tableStats.pending = append(d.mu.tableStats.pending, newTables...)
      75            1 :         d.maybeCollectTableStatsLocked()
      76              : }
      77              : 
      78            1 : func (d *DB) shouldCollectTableStatsLocked() bool {
      79            1 :         return !d.mu.tableStats.loading &&
      80            1 :                 d.closed.Load() == nil &&
      81            1 :                 !d.opts.DisableTableStats &&
      82            1 :                 (len(d.mu.tableStats.pending) > 0 || !d.mu.tableStats.loadedInitial)
      83            1 : }
      84              : 
      85              : // collectTableStats runs a table stats collection job, returning true if the
      86              : // invocation did the collection work, false otherwise (e.g. if another job was
      87              : // already running).
      88            1 : func (d *DB) collectTableStats() bool {
      89            1 :         const maxTableStatsPerScan = 50
      90            1 : 
      91            1 :         d.mu.Lock()
      92            1 :         if !d.shouldCollectTableStatsLocked() {
      93            1 :                 d.mu.Unlock()
      94            1 :                 return false
      95            1 :         }
      96            1 :         ctx := context.Background()
      97            1 : 
      98            1 :         pending := d.mu.tableStats.pending
      99            1 :         d.mu.tableStats.pending = nil
     100            1 :         d.mu.tableStats.loading = true
     101            1 :         jobID := d.newJobIDLocked()
     102            1 :         loadedInitial := d.mu.tableStats.loadedInitial
     103            1 :         // Drop DB.mu before performing IO.
     104            1 :         d.mu.Unlock()
     105            1 : 
     106            1 :         // Every run of collectTableStats either collects stats from the pending
     107            1 :         // list (if non-empty) or from scanning the version (loadedInitial is
     108            1 :         // false). This job only runs if at least one of those conditions holds.
     109            1 : 
     110            1 :         // Grab a read state to scan for tables.
     111            1 :         rs := d.loadReadState()
     112            1 :         var collected []collectedStats
     113            1 :         var hints []deleteCompactionHint
     114            1 :         if len(pending) > 0 {
     115            1 :                 collected, hints = d.loadNewFileStats(ctx, rs, pending)
     116            1 :         } else {
     117            1 :                 var moreRemain bool
     118            1 :                 var buf [maxTableStatsPerScan]collectedStats
     119            1 :                 collected, hints, moreRemain = d.scanReadStateTableStats(ctx, rs, buf[:0])
     120            1 :                 loadedInitial = !moreRemain
     121            1 :         }
     122            1 :         rs.unref()
     123            1 : 
     124            1 :         // Update the TableMetadata with the loaded stats while holding d.mu.
     125            1 :         d.mu.Lock()
     126            1 :         defer d.mu.Unlock()
     127            1 :         d.mu.tableStats.loading = false
     128            1 :         if loadedInitial && !d.mu.tableStats.loadedInitial {
     129            1 :                 d.mu.tableStats.loadedInitial = loadedInitial
     130            1 :                 d.opts.EventListener.TableStatsLoaded(TableStatsInfo{
     131            1 :                         JobID: int(jobID),
     132            1 :                 })
     133            1 :         }
     134              : 
     135            1 :         maybeCompact := false
     136            1 :         for _, c := range collected {
     137            1 :                 c.TableMetadata.Stats = c.TableStats
     138            1 :                 maybeCompact = maybeCompact || tableTombstoneCompensation(c.TableMetadata) > 0
     139            1 :                 sanityCheckStats(c.TableMetadata, d.opts.Logger, "collected stats")
     140            1 :                 c.TableMetadata.StatsMarkValid()
     141            1 :         }
     142              : 
     143            1 :         d.mu.tableStats.cond.Broadcast()
     144            1 :         d.maybeCollectTableStatsLocked()
     145            1 :         if len(hints) > 0 && !d.opts.private.disableDeleteOnlyCompactions {
     146            1 :                 // Verify that all of the hint tombstones' files still exist in the
     147            1 :                 // current version. Otherwise, the tombstone itself may have been
     148            1 :                 // compacted into L6 and more recent keys may have had their sequence
     149            1 :                 // numbers zeroed.
     150            1 :                 //
     151            1 :                 // Note that it's possible that the tombstone file is being compacted
     152            1 :                 // presently. In that case, the file will be present in v. When the
     153            1 :                 // compaction finishes compacting the tombstone file, it will detect
     154            1 :                 // and clear the hint.
     155            1 :                 //
     156            1 :                 // See DB.maybeUpdateDeleteCompactionHints.
     157            1 :                 v := d.mu.versions.currentVersion()
     158            1 :                 keepHints := hints[:0]
     159            1 :                 for _, h := range hints {
     160            1 :                         if v.Contains(h.tombstoneLevel, h.tombstoneFile) {
     161            1 :                                 keepHints = append(keepHints, h)
     162            1 :                         }
     163              :                 }
     164            1 :                 d.mu.compact.deletionHints = append(d.mu.compact.deletionHints, keepHints...)
     165              :         }
     166            1 :         if maybeCompact {
     167            1 :                 d.maybeScheduleCompaction()
     168            1 :         }
     169            1 :         return true
     170              : }
     171              : 
     172              : type collectedStats struct {
     173              :         *manifest.TableMetadata
     174              :         manifest.TableStats
     175              : }
     176              : 
     177              : func (d *DB) loadNewFileStats(
     178              :         ctx context.Context, rs *readState, pending []manifest.NewTableEntry,
     179            1 : ) ([]collectedStats, []deleteCompactionHint) {
     180            1 :         var hints []deleteCompactionHint
     181            1 :         collected := make([]collectedStats, 0, len(pending))
     182            1 :         for _, nf := range pending {
     183            1 :                 // A file's stats might have been populated by an earlier call to
     184            1 :                 // loadNewFileStats if the file was moved.
     185            1 :                 // NB: We're not holding d.mu which protects f.Stats, but only
     186            1 :                 // collectTableStats updates f.Stats for active files, and we
     187            1 :                 // ensure only one goroutine runs it at a time through
     188            1 :                 // d.mu.tableStats.loading.
     189            1 :                 if nf.Meta.StatsValid() {
     190            1 :                         continue
     191              :                 }
     192              : 
     193              :                 // The file isn't guaranteed to still be live in the readState's
     194              :                 // version. It may have been deleted or moved. Skip it if it's not in
     195              :                 // the expected level.
     196            1 :                 if !rs.current.Contains(nf.Level, nf.Meta) {
     197            1 :                         continue
     198              :                 }
     199              : 
     200            1 :                 stats, newHints, err := d.loadTableStats(ctx, rs.current, nf.Level, nf.Meta)
     201            1 :                 if err != nil {
     202            0 :                         d.opts.EventListener.BackgroundError(err)
     203            0 :                         continue
     204              :                 }
     205              :                 // NB: We don't update the TableMetadata yet, because we aren't holding
     206              :                 // DB.mu. We'll copy it to the TableMetadata after we're finished with
     207              :                 // IO.
     208            1 :                 collected = append(collected, collectedStats{
     209            1 :                         TableMetadata: nf.Meta,
     210            1 :                         TableStats:    stats,
     211            1 :                 })
     212            1 :                 hints = append(hints, newHints...)
     213              :         }
     214            1 :         return collected, hints
     215              : }
     216              : 
     217              : // scanReadStateTableStats is run by an active stat collection job when there
     218              : // are no pending new files, but there might be files that existed at Open for
     219              : // which we haven't loaded table stats.
     220              : func (d *DB) scanReadStateTableStats(
     221              :         ctx context.Context, rs *readState, fill []collectedStats,
     222            1 : ) ([]collectedStats, []deleteCompactionHint, bool) {
     223            1 :         moreRemain := false
     224            1 :         var hints []deleteCompactionHint
     225            1 :         sizesChecked := make(map[base.DiskFileNum]struct{})
     226            1 :         for l, levelMetadata := range rs.current.Levels {
     227            1 :                 for f := range levelMetadata.All() {
     228            1 :                         // NB: We're not holding d.mu which protects f.Stats, but only the
     229            1 :                         // active stats collection job updates f.Stats for active files,
     230            1 :                         // and we ensure only one goroutine runs it at a time through
     231            1 :                         // d.mu.tableStats.loading. This makes it safe to read validity
     232            1 :                         // through f.Stats.ValidLocked despite not holding d.mu.
     233            1 :                         if f.StatsValid() {
     234            1 :                                 continue
     235              :                         }
     236              : 
     237              :                         // Limit how much work we do per read state. The older the read
     238              :                         // state is, the higher the likelihood files are no longer being
     239              :                         // used in the current version. If we've exhausted our allowance,
     240              :                         // return true for the last return value to signal there's more
     241              :                         // work to do.
     242            1 :                         if len(fill) == cap(fill) {
     243            1 :                                 moreRemain = true
     244            1 :                                 return fill, hints, moreRemain
     245            1 :                         }
     246              : 
     247              :                         // If the file is remote and not SharedForeign, we should check if its size
     248              :                         // matches. This is because checkConsistency skips over remote files.
     249              :                         //
     250              :                         // SharedForeign and External files are skipped as their sizes are allowed
     251              :                         // to have a mismatch; the size stored in the TableBacking is just the part
     252              :                         // of the file that is referenced by this Pebble instance, not the size of
     253              :                         // the whole object.
     254            1 :                         objMeta, err := d.objProvider.Lookup(base.FileTypeTable, f.TableBacking.DiskFileNum)
     255            1 :                         if err != nil {
     256            0 :                                 // Set `moreRemain` so we'll try again.
     257            0 :                                 moreRemain = true
     258            0 :                                 d.opts.EventListener.BackgroundError(err)
     259            0 :                                 continue
     260              :                         }
     261              : 
     262            1 :                         shouldCheckSize := objMeta.IsRemote() &&
     263            1 :                                 !d.objProvider.IsSharedForeign(objMeta) &&
     264            1 :                                 !objMeta.IsExternal()
     265            1 :                         if _, ok := sizesChecked[f.TableBacking.DiskFileNum]; !ok && shouldCheckSize {
     266            1 :                                 size, err := d.objProvider.Size(objMeta)
     267            1 :                                 fileSize := f.TableBacking.Size
     268            1 :                                 if err != nil {
     269            0 :                                         moreRemain = true
     270            0 :                                         d.opts.EventListener.BackgroundError(err)
     271            0 :                                         continue
     272              :                                 }
     273            1 :                                 if size != int64(fileSize) {
     274            0 :                                         err := errors.Errorf(
     275            0 :                                                 "during consistency check in loadTableStats: L%d: %s: object size mismatch (%s): %d (provider) != %d (MANIFEST)",
     276            0 :                                                 errors.Safe(l), f.TableNum, d.objProvider.Path(objMeta),
     277            0 :                                                 errors.Safe(size), errors.Safe(fileSize))
     278            0 :                                         d.opts.EventListener.BackgroundError(err)
     279            0 :                                         d.opts.Logger.Fatalf("%s", err)
     280            0 :                                 }
     281              : 
     282            1 :                                 sizesChecked[f.TableBacking.DiskFileNum] = struct{}{}
     283              :                         }
     284              : 
     285            1 :                         stats, newHints, err := d.loadTableStats(ctx, rs.current, l, f)
     286            1 :                         if err != nil {
     287            0 :                                 // Set `moreRemain` so we'll try again.
     288            0 :                                 moreRemain = true
     289            0 :                                 d.opts.EventListener.BackgroundError(err)
     290            0 :                                 continue
     291              :                         }
     292            1 :                         fill = append(fill, collectedStats{
     293            1 :                                 TableMetadata: f,
     294            1 :                                 TableStats:    stats,
     295            1 :                         })
     296            1 :                         hints = append(hints, newHints...)
     297              :                 }
     298              :         }
     299            1 :         return fill, hints, moreRemain
     300              : }
     301              : 
     302              : func (d *DB) loadTableStats(
     303              :         ctx context.Context, v *manifest.Version, level int, meta *manifest.TableMetadata,
     304            1 : ) (manifest.TableStats, []deleteCompactionHint, error) {
     305            1 :         var stats manifest.TableStats
     306            1 :         var compactionHints []deleteCompactionHint
     307            1 : 
     308            1 :         err := d.fileCache.withReader(
     309            1 :                 ctx, block.NoReadEnv, meta, func(r *sstable.Reader, env sstable.ReadEnv) (err error) {
     310            1 :                         loadedProps, err := r.ReadPropertiesBlock(ctx, nil /* buffer pool */)
     311            1 :                         if err != nil {
     312            0 :                                 return err
     313            0 :                         }
     314            1 :                         props := loadedProps.CommonProperties
     315            1 :                         if meta.Virtual {
     316            1 :                                 props = loadedProps.GetScaledProperties(meta.TableBacking.Size, meta.Size)
     317            1 :                         }
     318            1 :                         stats.NumEntries = props.NumEntries
     319            1 :                         stats.NumDeletions = props.NumDeletions
     320            1 :                         stats.NumRangeKeySets = props.NumRangeKeySets
     321            1 :                         stats.ValueBlocksSize = props.ValueBlocksSize
     322            1 :                         stats.RawKeySize = props.RawKeySize
     323            1 :                         stats.RawValueSize = props.RawValueSize
     324            1 :                         stats.CompressionType = block.CompressionProfileByName(props.CompressionName)
     325            1 :                         if props.NumDataBlocks > 0 {
     326            1 :                                 stats.TombstoneDenseBlocksRatio = float64(props.NumTombstoneDenseBlocks) / float64(props.NumDataBlocks)
     327            1 :                         }
     328              : 
     329            1 :                         if props.NumPointDeletions() > 0 {
     330            1 :                                 if err = d.loadTablePointKeyStats(ctx, &props, v, level, meta, &stats); err != nil {
     331            0 :                                         return
     332            0 :                                 }
     333              :                         }
     334            1 :                         if r.Attributes.Intersects(sstable.AttributeRangeDels | sstable.AttributeRangeKeyDels) {
     335            1 :                                 compactionHints, err = d.loadTableRangeDelStats(ctx, r, v, level, meta, &stats, env)
     336            1 :                                 if err != nil {
     337            0 :                                         return
     338            0 :                                 }
     339              :                         }
     340            1 :                         return
     341              :                 })
     342            1 :         if err != nil {
     343            0 :                 return stats, nil, err
     344            0 :         }
     345            1 :         return stats, compactionHints, nil
     346              : }
     347              : 
     348              : // loadTablePointKeyStats calculates the point key statistics for the given
     349              : // table. The provided manifest.TableStats are updated.
     350              : func (d *DB) loadTablePointKeyStats(
     351              :         ctx context.Context,
     352              :         props *sstable.CommonProperties,
     353              :         v *manifest.Version,
     354              :         level int,
     355              :         meta *manifest.TableMetadata,
     356              :         stats *manifest.TableStats,
     357            1 : ) error {
     358            1 :         // TODO(jackson): If the file has a wide keyspace, the average
     359            1 :         // value size beneath the entire file might not be representative
     360            1 :         // of the size of the keys beneath the point tombstones.
     361            1 :         // We could write the ranges of 'clusters' of point tombstones to
     362            1 :         // a sstable property and call averageValueSizeBeneath for each of
     363            1 :         // these narrower ranges to improve the estimate.
     364            1 :         avgValLogicalSize, compressionRatio, err := d.estimateSizesBeneath(ctx, v, level, meta, props)
     365            1 :         if err != nil {
     366            0 :                 return err
     367            0 :         }
     368            1 :         stats.PointDeletionsBytesEstimate =
     369            1 :                 pointDeletionsBytesEstimate(props, avgValLogicalSize, compressionRatio)
     370            1 :         return nil
     371              : }
     372              : 
     373              : // loadTableRangeDelStats calculates the range deletion and range key deletion
     374              : // statistics for the given table.
     375              : func (d *DB) loadTableRangeDelStats(
     376              :         ctx context.Context,
     377              :         r *sstable.Reader,
     378              :         v *manifest.Version,
     379              :         level int,
     380              :         meta *manifest.TableMetadata,
     381              :         stats *manifest.TableStats,
     382              :         env sstable.ReadEnv,
     383            1 : ) ([]deleteCompactionHint, error) {
     384            1 :         iter, err := newCombinedDeletionKeyspanIter(ctx, d.opts.Comparer, r, meta, env)
     385            1 :         if err != nil {
     386            0 :                 return nil, err
     387            0 :         }
     388            1 :         defer iter.Close()
     389            1 :         var compactionHints []deleteCompactionHint
     390            1 :         // We iterate over the defragmented range tombstones and range key deletions,
     391            1 :         // which ensures we don't double count ranges deleted at different sequence
     392            1 :         // numbers. Also, merging abutting tombstones reduces the number of calls to
     393            1 :         // estimateReclaimedSizeBeneath which is costly, and improves the accuracy of
     394            1 :         // our overall estimate.
     395            1 :         s, err := iter.First()
     396            1 :         for ; s != nil; s, err = iter.Next() {
     397            1 :                 start, end := s.Start, s.End
     398            1 :                 // We only need to consider deletion size estimates for tables that contain
     399            1 :                 // RANGEDELs.
     400            1 :                 var maxRangeDeleteSeqNum base.SeqNum
     401            1 :                 for _, k := range s.Keys {
     402            1 :                         if k.Kind() == base.InternalKeyKindRangeDelete && maxRangeDeleteSeqNum < k.SeqNum() {
     403            1 :                                 maxRangeDeleteSeqNum = k.SeqNum()
     404            1 :                                 break
     405              :                         }
     406              :                 }
     407              : 
     408              :                 // If the file is in the last level of the LSM, there is no data beneath
     409              :                 // it. The fact that there is still a range tombstone in a bottommost file
     410              :                 // indicates two possibilites:
     411              :                 //   1. an open snapshot kept the tombstone around, and the data the
     412              :                 //      tombstone deletes is contained within the file itself.
     413              :                 //   2. the file was ingested.
     414              :                 // In the first case, we'd like to estimate disk usage within the file
     415              :                 // itself since compacting the file will drop that covered data. In the
     416              :                 // second case, we expect that compacting the file will NOT drop any
     417              :                 // data and rewriting the file is a waste of write bandwidth. We can
     418              :                 // distinguish these cases by looking at the table metadata's sequence
     419              :                 // numbers. A file's range deletions can only delete data within the
     420              :                 // file at lower sequence numbers. All keys in an ingested sstable adopt
     421              :                 // the same sequence number, preventing tombstones from deleting keys
     422              :                 // within the same file. We check here if the largest RANGEDEL sequence
     423              :                 // number is greater than the file's smallest sequence number. If it is,
     424              :                 // the RANGEDEL could conceivably (although inconclusively) delete data
     425              :                 // within the same file.
     426              :                 //
     427              :                 // Note that this heuristic is imperfect. If a table containing a range
     428              :                 // deletion is ingested into L5 and subsequently compacted into L6 but
     429              :                 // an open snapshot prevents elision of covered keys in L6, the
     430              :                 // resulting RangeDeletionsBytesEstimate will incorrectly include all
     431              :                 // covered keys.
     432              :                 //
     433              :                 // TODO(jackson): We could prevent the above error in the heuristic by
     434              :                 // computing the file's RangeDeletionsBytesEstimate during the
     435              :                 // compaction itself. It's unclear how common this is.
     436              :                 //
     437              :                 // NOTE: If the span `s` wholly contains a table containing range keys,
     438              :                 // the returned size estimate will be slightly inflated by the range key
     439              :                 // block. However, in practice, range keys are expected to be rare, and
     440              :                 // the size of the range key block relative to the overall size of the
     441              :                 // table is expected to be small.
     442            1 :                 if level == numLevels-1 && meta.SmallestSeqNum < maxRangeDeleteSeqNum {
     443            1 :                         size, err := r.EstimateDiskUsage(start, end, env)
     444            1 :                         if err != nil {
     445            0 :                                 return nil, err
     446            0 :                         }
     447            1 :                         stats.RangeDeletionsBytesEstimate += size
     448            1 : 
     449            1 :                         // As the file is in the bottommost level, there is no need to collect a
     450            1 :                         // deletion hint.
     451            1 :                         continue
     452              :                 }
     453              : 
     454              :                 // While the size estimates for point keys should only be updated if this
     455              :                 // span contains a range del, the sequence numbers are required for the
     456              :                 // hint. Unconditionally descend, but conditionally update the estimates.
     457            1 :                 hintType := compactionHintFromKeys(s.Keys)
     458            1 :                 estimate, hintSeqNum, err := d.estimateReclaimedSizeBeneath(ctx, v, level, start, end, hintType)
     459            1 :                 if err != nil {
     460            0 :                         return nil, err
     461            0 :                 }
     462            1 :                 stats.RangeDeletionsBytesEstimate += estimate
     463            1 : 
     464            1 :                 // hintSeqNum is the smallest sequence number contained in any
     465            1 :                 // file overlapping with the hint and in a level below it.
     466            1 :                 if hintSeqNum == math.MaxUint64 {
     467            1 :                         continue
     468              :                 }
     469            1 :                 compactionHints = append(compactionHints, deleteCompactionHint{
     470            1 :                         hintType:                hintType,
     471            1 :                         start:                   slices.Clone(start),
     472            1 :                         end:                     slices.Clone(end),
     473            1 :                         tombstoneFile:           meta,
     474            1 :                         tombstoneLevel:          level,
     475            1 :                         tombstoneLargestSeqNum:  s.LargestSeqNum(),
     476            1 :                         tombstoneSmallestSeqNum: s.SmallestSeqNum(),
     477            1 :                         fileSmallestSeqNum:      hintSeqNum,
     478            1 :                 })
     479              :         }
     480            1 :         if err != nil {
     481            0 :                 return nil, err
     482            0 :         }
     483            1 :         return compactionHints, nil
     484              : }
     485              : 
     486              : func (d *DB) estimateSizesBeneath(
     487              :         ctx context.Context,
     488              :         v *manifest.Version,
     489              :         level int,
     490              :         meta *manifest.TableMetadata,
     491              :         fileProps *sstable.CommonProperties,
     492            1 : ) (avgValueLogicalSize, compressionRatio float64, err error) {
     493            1 :         // Find all files in lower levels that overlap with meta,
     494            1 :         // summing their value sizes and entry counts.
     495            1 : 
     496            1 :         // Include the file itself. This is important because in some instances, the
     497            1 :         // computed compression ratio is applied to the tombstones contained within
     498            1 :         // `meta` itself. If there are no files beneath `meta` in the LSM, we would
     499            1 :         // calculate a compression ratio of 0 which is not accurate for the file's
     500            1 :         // own tombstones.
     501            1 :         var (
     502            1 :                 // TODO(sumeer): The entryCount includes the tombstones, which can be small,
     503            1 :                 // resulting in a lower than expected avgValueLogicalSize. For an example of
     504            1 :                 // this effect see the estimate in testdata/compaction_picker_scores (search
     505            1 :                 // for "point-deletions-bytes-estimate: 163850").
     506            1 :                 fileSum    = meta.Size + meta.EstimatedReferenceSize()
     507            1 :                 entryCount = fileProps.NumEntries
     508            1 :                 keySum     = fileProps.RawKeySize
     509            1 :                 valSum     = fileProps.RawValueSize
     510            1 :         )
     511            1 : 
     512            1 :         for l := level + 1; l < numLevels; l++ {
     513            1 :                 for tableBeneath := range v.Overlaps(l, meta.UserKeyBounds()).All() {
     514            1 :                         fileSum += tableBeneath.Size + tableBeneath.EstimatedReferenceSize()
     515            1 :                         if tableBeneath.StatsValid() {
     516            1 :                                 entryCount += tableBeneath.Stats.NumEntries
     517            1 :                                 keySum += tableBeneath.Stats.RawKeySize
     518            1 :                                 valSum += tableBeneath.Stats.RawValueSize
     519            1 :                                 continue
     520              :                         }
     521              :                         // If stats aren't available, we need to read the properties block.
     522            1 :                         err := d.fileCache.withReader(ctx, block.NoReadEnv, tableBeneath, func(v *sstable.Reader, _ sstable.ReadEnv) (err error) {
     523            1 :                                 loadedProps, err := v.ReadPropertiesBlock(ctx, nil /* buffer pool */)
     524            1 :                                 if err != nil {
     525            0 :                                         return err
     526            0 :                                 }
     527            1 :                                 props := loadedProps.CommonProperties
     528            1 :                                 if tableBeneath.Virtual {
     529            1 :                                         props = loadedProps.GetScaledProperties(tableBeneath.TableBacking.Size, tableBeneath.Size)
     530            1 :                                 }
     531              : 
     532            1 :                                 entryCount += props.NumEntries
     533            1 :                                 keySum += props.RawKeySize
     534            1 :                                 valSum += props.RawValueSize
     535            1 :                                 return nil
     536              :                         })
     537            1 :                         if err != nil {
     538            0 :                                 return 0, 0, err
     539            0 :                         }
     540              :                 }
     541              :         }
     542            1 :         if entryCount == 0 {
     543            0 :                 return 0, 0, nil
     544            0 :         }
     545              :         // RawKeySize and RawValueSize are uncompressed totals. We'll need to scale
     546              :         // the value sum according to the data size to account for compression,
     547              :         // index blocks and metadata overhead. Eg:
     548              :         //
     549              :         //    Compression rate        ×  Average uncompressed value size
     550              :         //
     551              :         //                            ↓
     552              :         //
     553              :         //         FileSize              RawValueSize
     554              :         //   -----------------------  ×  ------------
     555              :         //   RawKeySize+RawValueSize     NumEntries
     556              :         //
     557              :         // We return the average logical value size plus the compression ratio,
     558              :         // leaving the scaling to the caller. This allows the caller to perform
     559              :         // additional compression ratio scaling if necessary.
     560            1 :         uncompressedSum := float64(keySum + valSum)
     561            1 :         compressionRatio = float64(fileSum) / uncompressedSum
     562            1 :         if compressionRatio > 1 {
     563            1 :                 // We can get huge compression ratios due to the fixed overhead of files
     564            1 :                 // containing a tiny amount of data. By setting this to 1, we are ignoring
     565            1 :                 // that overhead, but we accept that tradeoff since the total bytes in
     566            1 :                 // such overhead is not large.
     567            1 :                 compressionRatio = 1
     568            1 :         }
     569            1 :         avgValueLogicalSize = (float64(valSum) / float64(entryCount))
     570            1 :         return avgValueLogicalSize, compressionRatio, nil
     571              : }
     572              : 
     573              : func (d *DB) estimateReclaimedSizeBeneath(
     574              :         ctx context.Context,
     575              :         v *manifest.Version,
     576              :         level int,
     577              :         start, end []byte,
     578              :         hintType deleteCompactionHintType,
     579            1 : ) (estimate uint64, hintSeqNum base.SeqNum, err error) {
     580            1 :         // Find all files in lower levels that overlap with the deleted range
     581            1 :         // [start, end).
     582            1 :         //
     583            1 :         // An overlapping file might be completely contained by the range
     584            1 :         // tombstone, in which case we can count the entire file size in
     585            1 :         // our estimate without doing any additional I/O.
     586            1 :         //
     587            1 :         // Otherwise, estimating the range for the file requires
     588            1 :         // additional I/O to read the file's index blocks.
     589            1 :         hintSeqNum = math.MaxUint64
     590            1 :         // TODO(jbowens): When there are multiple sub-levels in L0 and the RANGEDEL
     591            1 :         // is from a higher sub-level, we incorrectly skip the files in the lower
     592            1 :         // sub-levels when estimating this overlap.
     593            1 :         for l := level + 1; l < numLevels; l++ {
     594            1 :                 for file := range v.Overlaps(l, base.UserKeyBoundsEndExclusive(start, end)).All() {
     595            1 :                         // Determine whether we need to update size estimates and hint seqnums
     596            1 :                         // based on the type of hint and the type of keys in this file.
     597            1 :                         var updateEstimates, updateHints bool
     598            1 :                         switch hintType {
     599            1 :                         case deleteCompactionHintTypePointKeyOnly:
     600            1 :                                 // The range deletion byte estimates should only be updated if this
     601            1 :                                 // table contains point keys. This ends up being an overestimate in
     602            1 :                                 // the case that table also has range keys, but such keys are expected
     603            1 :                                 // to contribute a negligible amount of the table's overall size,
     604            1 :                                 // relative to point keys.
     605            1 :                                 if file.HasPointKeys {
     606            1 :                                         updateEstimates = true
     607            1 :                                 }
     608              :                                 // As the initiating span contained only range dels, hints can only be
     609              :                                 // updated if this table does _not_ contain range keys.
     610            1 :                                 if !file.HasRangeKeys {
     611            1 :                                         updateHints = true
     612            1 :                                 }
     613            1 :                         case deleteCompactionHintTypeRangeKeyOnly:
     614            1 :                                 // The initiating span contained only range key dels. The estimates
     615            1 :                                 // apply only to point keys, and are therefore not updated.
     616            1 :                                 updateEstimates = false
     617            1 :                                 // As the initiating span contained only range key dels, hints can
     618            1 :                                 // only be updated if this table does _not_ contain point keys.
     619            1 :                                 if !file.HasPointKeys {
     620            1 :                                         updateHints = true
     621            1 :                                 }
     622            1 :                         case deleteCompactionHintTypePointAndRangeKey:
     623            1 :                                 // Always update the estimates and hints, as this hint type can drop a
     624            1 :                                 // file, irrespective of the mixture of keys. Similar to above, the
     625            1 :                                 // range del bytes estimates is an overestimate.
     626            1 :                                 updateEstimates, updateHints = true, true
     627            0 :                         default:
     628            0 :                                 panic(fmt.Sprintf("pebble: unknown hint type %s", hintType))
     629              :                         }
     630            1 :                         startCmp := d.cmp(start, file.Smallest().UserKey)
     631            1 :                         endCmp := d.cmp(file.Largest().UserKey, end)
     632            1 :                         if startCmp <= 0 && (endCmp < 0 || endCmp == 0 && file.Largest().IsExclusiveSentinel()) {
     633            1 :                                 // The range fully contains the file, so skip looking it up in table
     634            1 :                                 // cache/looking at its indexes and add the full file size.
     635            1 :                                 if updateEstimates {
     636            1 :                                         estimate += file.Size
     637            1 :                                 }
     638            1 :                                 if updateHints && hintSeqNum > file.SmallestSeqNum {
     639            1 :                                         hintSeqNum = file.SmallestSeqNum
     640            1 :                                 }
     641            1 :                         } else if d.cmp(file.Smallest().UserKey, end) <= 0 && d.cmp(start, file.Largest().UserKey) <= 0 {
     642            1 :                                 // Partial overlap.
     643            1 :                                 if hintType == deleteCompactionHintTypeRangeKeyOnly {
     644            1 :                                         // If the hint that generated this overlap contains only range keys,
     645            1 :                                         // there is no need to calculate disk usage, as the reclaimable space
     646            1 :                                         // is expected to be minimal relative to point keys.
     647            1 :                                         continue
     648              :                                 }
     649            1 :                                 var size uint64
     650            1 :                                 err := d.fileCache.withReader(ctx, block.NoReadEnv, file,
     651            1 :                                         func(r *sstable.Reader, env sstable.ReadEnv) (err error) {
     652            1 :                                                 size, err = r.EstimateDiskUsage(start, end, env)
     653            1 :                                                 return err
     654            1 :                                         })
     655            1 :                                 if err != nil {
     656            0 :                                         return 0, hintSeqNum, err
     657            0 :                                 }
     658            1 :                                 estimate += size
     659            1 :                                 if updateHints && hintSeqNum > file.SmallestSeqNum && d.FormatMajorVersion() >= FormatVirtualSSTables {
     660            1 :                                         // If the format major version is past Virtual SSTables, deletion only
     661            1 :                                         // hints can also apply to partial overlaps with sstables.
     662            1 :                                         hintSeqNum = file.SmallestSeqNum
     663            1 :                                 }
     664              :                         }
     665              :                 }
     666              :         }
     667            1 :         return estimate, hintSeqNum, nil
     668              : }
     669              : 
     670              : var lastSanityCheckStatsLog crtime.AtomicMono
     671              : 
     672            1 : func sanityCheckStats(meta *manifest.TableMetadata, logger Logger, info string) {
     673            1 :         // Values for PointDeletionsBytesEstimate and RangeDeletionsBytesEstimate that
     674            1 :         // exceed this value are likely indicative of a bug (eg, underflow).
     675            1 :         const maxDeletionBytesEstimate = 1 << 50 // 1 PiB
     676            1 : 
     677            1 :         if meta.Stats.PointDeletionsBytesEstimate > maxDeletionBytesEstimate ||
     678            1 :                 meta.Stats.RangeDeletionsBytesEstimate > maxDeletionBytesEstimate {
     679            0 :                 if invariants.Enabled {
     680            0 :                         panic(fmt.Sprintf("%s: table %s has extreme deletion bytes estimates: point=%d range=%d",
     681            0 :                                 info, meta.TableNum,
     682            0 :                                 redact.Safe(meta.Stats.PointDeletionsBytesEstimate),
     683            0 :                                 redact.Safe(meta.Stats.RangeDeletionsBytesEstimate),
     684            0 :                         ))
     685              :                 }
     686            0 :                 if v := lastSanityCheckStatsLog.Load(); v == 0 || v.Elapsed() > 30*time.Second {
     687            0 :                         logger.Errorf("%s: table %s has extreme deletion bytes estimates: point=%d range=%d",
     688            0 :                                 info, meta.TableNum,
     689            0 :                                 redact.Safe(meta.Stats.PointDeletionsBytesEstimate),
     690            0 :                                 redact.Safe(meta.Stats.RangeDeletionsBytesEstimate),
     691            0 :                         )
     692            0 :                         lastSanityCheckStatsLog.Store(crtime.NowMono())
     693            0 :                 }
     694              :         }
     695              : }
     696              : 
     697              : func maybeSetStatsFromProperties(
     698              :         meta *manifest.TableMetadata, props *sstable.CommonProperties, logger Logger,
     699            1 : ) bool {
     700            1 :         // If a table contains range deletions or range key deletions, we defer the
     701            1 :         // stats collection. There are two main reasons for this:
     702            1 :         //
     703            1 :         //  1. Estimating the potential for reclaimed space due to a range deletion
     704            1 :         //     tombstone requires scanning the LSM - a potentially expensive operation
     705            1 :         //     that should be deferred.
     706            1 :         //  2. Range deletions and / or range key deletions present an opportunity to
     707            1 :         //     compute "deletion hints", which also requires a scan of the LSM to
     708            1 :         //     compute tables that would be eligible for deletion.
     709            1 :         //
     710            1 :         // These two tasks are deferred to the table stats collector goroutine.
     711            1 :         if props.NumRangeDeletions != 0 || props.NumRangeKeyDels != 0 {
     712            1 :                 return false
     713            1 :         }
     714              : 
     715              :         // If a table is more than 10% point deletions without user-provided size
     716              :         // estimates, don't calculate the PointDeletionsBytesEstimate statistic
     717              :         // using our limited knowledge. The table stats collector can populate the
     718              :         // stats and calculate an average of value size of all the tables beneath
     719              :         // the table in the LSM, which will be more accurate.
     720            1 :         if unsizedDels := (props.NumDeletions - props.NumSizedDeletions); unsizedDels > props.NumEntries/10 {
     721            1 :                 return false
     722            1 :         }
     723              : 
     724            1 :         var pointEstimate uint64
     725            1 :         if props.NumEntries > 0 {
     726            1 :                 // Use the file's own average key and value sizes as an estimate. This
     727            1 :                 // doesn't require any additional IO and since the number of point
     728            1 :                 // deletions in the file is low, the error introduced by this crude
     729            1 :                 // estimate is expected to be small.
     730            1 :                 avgValSize, compressionRatio := estimatePhysicalSizes(meta, props)
     731            1 :                 pointEstimate = pointDeletionsBytesEstimate(props, avgValSize, compressionRatio)
     732            1 :         }
     733              : 
     734            1 :         meta.Stats.NumEntries = props.NumEntries
     735            1 :         meta.Stats.NumDeletions = props.NumDeletions
     736            1 :         meta.Stats.NumRangeKeySets = props.NumRangeKeySets
     737            1 :         meta.Stats.PointDeletionsBytesEstimate = pointEstimate
     738            1 :         meta.Stats.RangeDeletionsBytesEstimate = 0
     739            1 :         meta.Stats.ValueBlocksSize = props.ValueBlocksSize
     740            1 :         meta.Stats.RawKeySize = props.RawKeySize
     741            1 :         meta.Stats.RawValueSize = props.RawValueSize
     742            1 :         meta.Stats.CompressionType = block.CompressionProfileByName(props.CompressionName)
     743            1 :         meta.StatsMarkValid()
     744            1 :         sanityCheckStats(meta, logger, "stats from properties")
     745            1 :         return true
     746              : }
     747              : 
     748              : func pointDeletionsBytesEstimate(
     749              :         props *sstable.CommonProperties, avgValLogicalSize, compressionRatio float64,
     750            1 : ) (estimate uint64) {
     751            1 :         if props.NumEntries == 0 {
     752            0 :                 return 0
     753            0 :         }
     754            1 :         numPointDels := props.NumPointDeletions()
     755            1 :         if numPointDels == 0 {
     756            1 :                 return 0
     757            1 :         }
     758              :         // Estimate the potential space to reclaim using the table's own properties.
     759              :         // There may or may not be keys covered by any individual point tombstone.
     760              :         // If not, compacting the point tombstone into L6 will at least allow us to
     761              :         // drop the point deletion key and will reclaim the tombstone's key bytes.
     762              :         // If there are covered key(s), we also get to drop key and value bytes for
     763              :         // each covered key.
     764              :         //
     765              :         // Some point tombstones (DELSIZEDs) carry a user-provided estimate of the
     766              :         // uncompressed size of entries that will be elided by fully compacting the
     767              :         // tombstone. For these tombstones, there's no guesswork—we use the
     768              :         // RawPointTombstoneValueSizeHint property which is the sum of all these
     769              :         // tombstones' encoded values.
     770              :         //
     771              :         // For un-sized point tombstones (DELs), we estimate assuming that each
     772              :         // point tombstone on average covers 1 key and using average value sizes.
     773              :         // This is almost certainly an overestimate, but that's probably okay
     774              :         // because point tombstones can slow range iterations even when they don't
     775              :         // cover a key.
     776              :         //
     777              :         // TODO(jackson): This logic doesn't directly incorporate fixed per-key
     778              :         // overhead (8-byte trailer, plus at least 1 byte encoding the length of the
     779              :         // key and 1 byte encoding the length of the value). This overhead is
     780              :         // indirectly incorporated through the compression ratios, but that results
     781              :         // in the overhead being smeared per key-byte and value-byte, rather than
     782              :         // per-entry. This per-key fixed overhead can be nontrivial, especially for
     783              :         // dense swaths of point tombstones. Give some thought as to whether we
     784              :         // should directly include fixed per-key overhead in the calculations.
     785              : 
     786              :         // Below, we calculate the tombstone contributions and the shadowed keys'
     787              :         // contributions separately.
     788            1 :         var tombstonesLogicalSize float64
     789            1 :         var shadowedLogicalSize float64
     790            1 : 
     791            1 :         // 1. Calculate the contribution of the tombstone keys themselves.
     792            1 :         if props.RawPointTombstoneKeySize > 0 {
     793            1 :                 tombstonesLogicalSize += float64(props.RawPointTombstoneKeySize)
     794            1 :         } else {
     795            0 :                 // This sstable predates the existence of the RawPointTombstoneKeySize
     796            0 :                 // property. We can use the average key size within the file itself and
     797            0 :                 // the count of point deletions to estimate the size.
     798            0 :                 tombstonesLogicalSize += float64(numPointDels * props.RawKeySize / props.NumEntries)
     799            0 :         }
     800              : 
     801              :         // 2. Calculate the contribution of the keys shadowed by tombstones.
     802              :         //
     803              :         // 2a. First account for keys shadowed by DELSIZED tombstones. THE DELSIZED
     804              :         // tombstones encode the size of both the key and value of the shadowed KV
     805              :         // entries. These sizes are aggregated into a sstable property.
     806            1 :         shadowedLogicalSize += float64(props.RawPointTombstoneValueSize)
     807            1 : 
     808            1 :         // 2b. Calculate the contribution of the KV entries shadowed by ordinary DEL
     809            1 :         // keys.
     810            1 :         numUnsizedDels := invariants.SafeSub(numPointDels, props.NumSizedDeletions)
     811            1 :         {
     812            1 :                 // The shadowed keys have the same exact user keys as the tombstones
     813            1 :                 // themselves, so we can use the `tombstonesLogicalSize` we computed
     814            1 :                 // earlier as an estimate. There's a complication that
     815            1 :                 // `tombstonesLogicalSize` may include DELSIZED keys we already
     816            1 :                 // accounted for.
     817            1 :                 shadowedLogicalSize += float64(tombstonesLogicalSize) / float64(numPointDels) * float64(numUnsizedDels)
     818            1 : 
     819            1 :                 // Calculate the contribution of the deleted values. The caller has
     820            1 :                 // already computed an average logical size (possibly computed across
     821            1 :                 // many sstables).
     822            1 :                 shadowedLogicalSize += float64(numUnsizedDels) * avgValLogicalSize
     823            1 :         }
     824              : 
     825              :         // Scale both tombstone and shadowed totals by logical:physical ratios to
     826              :         // account for compression, metadata overhead, etc.
     827              :         //
     828              :         //      Physical             FileSize
     829              :         //     -----------  = -----------------------
     830              :         //      Logical       RawKeySize+RawValueSize
     831              :         //
     832            1 :         return uint64((tombstonesLogicalSize + shadowedLogicalSize) * compressionRatio)
     833              : }
     834              : 
     835              : func estimatePhysicalSizes(
     836              :         tableMeta *manifest.TableMetadata, props *sstable.CommonProperties,
     837            1 : ) (avgValLogicalSize, compressionRatio float64) {
     838            1 :         // RawKeySize and RawValueSize are uncompressed totals. Scale according to
     839            1 :         // the data size to account for compression, index blocks and metadata
     840            1 :         // overhead. Eg:
     841            1 :         //
     842            1 :         //    Compression rate        ×  Average uncompressed value size
     843            1 :         //
     844            1 :         //                            ↓
     845            1 :         //
     846            1 :         //         FileSize              RawValSize
     847            1 :         //   -----------------------  ×  ----------
     848            1 :         //   RawKeySize+RawValueSize     NumEntries
     849            1 :         //
     850            1 :         physicalSize := tableMeta.Size + tableMeta.EstimatedReferenceSize()
     851            1 :         uncompressedSum := props.RawKeySize + props.RawValueSize
     852            1 :         compressionRatio = float64(physicalSize) / float64(uncompressedSum)
     853            1 :         if compressionRatio > 1 {
     854            1 :                 // We can get huge compression ratios due to the fixed overhead of files
     855            1 :                 // containing a tiny amount of data. By setting this to 1, we are ignoring
     856            1 :                 // that overhead, but we accept that tradeoff since the total bytes in
     857            1 :                 // such overhead is not large.
     858            1 :                 compressionRatio = 1
     859            1 :         }
     860            1 :         avgValLogicalSize = (float64(props.RawValueSize) / float64(props.NumEntries))
     861            1 :         return avgValLogicalSize, compressionRatio
     862              : }
     863              : 
     864              : // newCombinedDeletionKeyspanIter returns a keyspan.FragmentIterator that
     865              : // returns "ranged deletion" spans for a single table, providing a combined view
     866              : // of both range deletion and range key deletion spans. The
     867              : // tableRangedDeletionIter is intended for use in the specific case of computing
     868              : // the statistics and deleteCompactionHints for a single table.
     869              : //
     870              : // As an example, consider the following set of spans from the range deletion
     871              : // and range key blocks of a table:
     872              : //
     873              : //                    |---------|     |---------|         |-------| RANGEKEYDELs
     874              : //              |-----------|-------------|           |-----|       RANGEDELs
     875              : //        __________________________________________________________
     876              : //              a b c d e f g h i j k l m n o p q r s t u v w x y z
     877              : //
     878              : // The tableRangedDeletionIter produces the following set of output spans, where
     879              : // '1' indicates a span containing only range deletions, '2' is a span
     880              : // containing only range key deletions, and '3' is a span containing a mixture
     881              : // of both range deletions and range key deletions.
     882              : //
     883              : //                 1       3       1    3    2          1  3   2
     884              : //              |-----|---------|-----|---|-----|     |---|-|-----|
     885              : //        __________________________________________________________
     886              : //              a b c d e f g h i j k l m n o p q r s t u v w x y z
     887              : //
     888              : // Algorithm.
     889              : //
     890              : // The iterator first defragments the range deletion and range key blocks
     891              : // separately. During this defragmentation, the range key block is also filtered
     892              : // so that keys other than range key deletes are ignored. The range delete and
     893              : // range key delete keyspaces are then merged.
     894              : //
     895              : // Note that the only fragmentation introduced by merging is from where a range
     896              : // del span overlaps with a range key del span. Within the bounds of any overlap
     897              : // there is guaranteed to be no further fragmentation, as the constituent spans
     898              : // have already been defragmented. To the left and right of any overlap, the
     899              : // same reasoning applies. For example,
     900              : //
     901              : //                       |--------|         |-------| RANGEKEYDEL
     902              : //              |---------------------------|         RANGEDEL
     903              : //              |----1---|----3---|----1----|---2---| Merged, fragmented spans.
     904              : //        __________________________________________________________
     905              : //              a b c d e f g h i j k l m n o p q r s t u v w x y z
     906              : //
     907              : // Any fragmented abutting spans produced by the merging iter will be of
     908              : // differing types (i.e. a transition from a span with homogenous key kinds to a
     909              : // heterogeneous span, or a transition from a span with exclusively range dels
     910              : // to a span with exclusively range key dels). Therefore, further
     911              : // defragmentation is not required.
     912              : //
     913              : // Each span returned by the tableRangeDeletionIter will have at most four keys,
     914              : // corresponding to the largest and smallest sequence numbers encountered across
     915              : // the range deletes and range keys deletes that comprised the merged spans.
     916              : func newCombinedDeletionKeyspanIter(
     917              :         ctx context.Context,
     918              :         comparer *base.Comparer,
     919              :         r *sstable.Reader,
     920              :         m *manifest.TableMetadata,
     921              :         env sstable.ReadEnv,
     922            1 : ) (keyspan.FragmentIterator, error) {
     923            1 :         // The range del iter and range key iter are each wrapped in their own
     924            1 :         // defragmenting iter. For each iter, abutting spans can always be merged.
     925            1 :         var equal = keyspan.DefragmentMethodFunc(func(_ base.CompareRangeSuffixes, a, b *keyspan.Span) bool { return true })
     926              :         // Reduce keys by maintaining a slice of at most length two, corresponding to
     927              :         // the largest and smallest keys in the defragmented span. This maintains the
     928              :         // contract that the emitted slice is sorted by (SeqNum, Kind) descending.
     929            1 :         reducer := func(current, incoming []keyspan.Key) []keyspan.Key {
     930            1 :                 if len(current) == 0 && len(incoming) == 0 {
     931            0 :                         // While this should never occur in practice, a defensive return is used
     932            0 :                         // here to preserve correctness.
     933            0 :                         return current
     934            0 :                 }
     935            1 :                 var largest, smallest keyspan.Key
     936            1 :                 var set bool
     937            1 :                 for _, keys := range [2][]keyspan.Key{current, incoming} {
     938            1 :                         if len(keys) == 0 {
     939            0 :                                 continue
     940              :                         }
     941            1 :                         first, last := keys[0], keys[len(keys)-1]
     942            1 :                         if !set {
     943            1 :                                 largest, smallest = first, last
     944            1 :                                 set = true
     945            1 :                                 continue
     946              :                         }
     947            1 :                         if first.Trailer > largest.Trailer {
     948            1 :                                 largest = first
     949            1 :                         }
     950            1 :                         if last.Trailer < smallest.Trailer {
     951            1 :                                 smallest = last
     952            1 :                         }
     953              :                 }
     954            1 :                 if largest.Equal(comparer.CompareRangeSuffixes, smallest) {
     955            1 :                         current = append(current[:0], largest)
     956            1 :                 } else {
     957            1 :                         current = append(current[:0], largest, smallest)
     958            1 :                 }
     959            1 :                 return current
     960              :         }
     961              : 
     962              :         // The separate iters for the range dels and range keys are wrapped in a
     963              :         // merging iter to join the keyspaces into a single keyspace. The separate
     964              :         // iters are only added if the particular key kind is present.
     965            1 :         mIter := &keyspanimpl.MergingIter{}
     966            1 :         var transform = keyspan.TransformerFunc(func(_ base.CompareRangeSuffixes, in keyspan.Span, out *keyspan.Span) error {
     967            1 :                 if in.KeysOrder != keyspan.ByTrailerDesc {
     968            0 :                         return base.AssertionFailedf("combined deletion iter encountered keys in non-trailer descending order")
     969            0 :                 }
     970            1 :                 out.Start, out.End = in.Start, in.End
     971            1 :                 out.Keys = append(out.Keys[:0], in.Keys...)
     972            1 :                 out.KeysOrder = keyspan.ByTrailerDesc
     973            1 :                 // NB: The order of by-trailer descending may have been violated,
     974            1 :                 // because we've layered rangekey and rangedel iterators from the same
     975            1 :                 // sstable into the same keyspanimpl.MergingIter. The MergingIter will
     976            1 :                 // return the keys in the order that the child iterators were provided.
     977            1 :                 // Sort the keys to ensure they're sorted by trailer descending.
     978            1 :                 keyspan.SortKeysByTrailer(out.Keys)
     979            1 :                 return nil
     980              :         })
     981            1 :         mIter.Init(comparer, transform, new(keyspanimpl.MergingBuffers))
     982            1 :         iter, err := r.NewRawRangeDelIter(ctx, m.FragmentIterTransforms(), env)
     983            1 :         if err != nil {
     984            0 :                 return nil, err
     985            0 :         }
     986            1 :         if iter != nil {
     987            1 :                 // Assert expected bounds. In previous versions of Pebble, range
     988            1 :                 // deletions persisted to sstables could exceed the bounds of the
     989            1 :                 // containing files due to "split user keys." This required readers to
     990            1 :                 // constrain the tombstones' bounds to the containing file at read time.
     991            1 :                 // See docs/range_deletions.md for an extended discussion of the design
     992            1 :                 // and invariants at that time.
     993            1 :                 //
     994            1 :                 // We've since compacted away all 'split user-keys' and in the process
     995            1 :                 // eliminated all "untruncated range tombstones" for physical sstables.
     996            1 :                 // We no longer need to perform truncation at read time for these
     997            1 :                 // sstables.
     998            1 :                 //
     999            1 :                 // At the same time, we've also introduced the concept of "virtual
    1000            1 :                 // SSTables" where the table metadata's effective bounds can again be
    1001            1 :                 // reduced to be narrower than the contained tombstones. These virtual
    1002            1 :                 // SSTables handle truncation differently, performing it using
    1003            1 :                 // keyspan.Truncate when the sstable's range deletion iterator is
    1004            1 :                 // opened.
    1005            1 :                 //
    1006            1 :                 // Together, these mean that we should never see untruncated range
    1007            1 :                 // tombstones any more—and the merging iterator no longer accounts for
    1008            1 :                 // their existence. Since there's abundant subtlety that we're relying
    1009            1 :                 // on, we choose to be conservative and assert that these invariants
    1010            1 :                 // hold. We could (and previously did) choose to only validate these
    1011            1 :                 // bounds in invariants builds, but the most likely avenue for these
    1012            1 :                 // tombstones' existence is through a bug in a migration and old data
    1013            1 :                 // sitting around in an old store from long ago.
    1014            1 :                 //
    1015            1 :                 // The table stats collector will read all files' range deletions
    1016            1 :                 // asynchronously after Open, and provides a perfect opportunity to
    1017            1 :                 // validate our invariants without harming user latency. We also
    1018            1 :                 // previously performed truncation here which similarly required key
    1019            1 :                 // comparisons, so replacing those key comparisons with assertions
    1020            1 :                 // should be roughly similar in performance.
    1021            1 :                 //
    1022            1 :                 // TODO(jackson): Only use AssertBounds in invariants builds in the
    1023            1 :                 // following release.
    1024            1 :                 iter = keyspan.AssertBounds(
    1025            1 :                         iter, m.PointKeyBounds.Smallest(), m.PointKeyBounds.LargestUserKey(), comparer.Compare,
    1026            1 :                 )
    1027            1 :                 dIter := &keyspan.DefragmentingIter{}
    1028            1 :                 dIter.Init(comparer, iter, equal, reducer, new(keyspan.DefragmentingBuffers))
    1029            1 :                 iter = dIter
    1030            1 :                 mIter.AddLevel(iter)
    1031            1 :         }
    1032              : 
    1033            1 :         iter, err = r.NewRawRangeKeyIter(ctx, m.FragmentIterTransforms(), env)
    1034            1 :         if err != nil {
    1035            0 :                 return nil, err
    1036            0 :         }
    1037            1 :         if iter != nil {
    1038            1 :                 // Assert expected bounds in tests.
    1039            1 :                 if invariants.Sometimes(50) {
    1040            1 :                         if m.HasRangeKeys {
    1041            1 :                                 iter = keyspan.AssertBounds(
    1042            1 :                                         iter, m.RangeKeyBounds.Smallest(), m.RangeKeyBounds.LargestUserKey(), comparer.Compare,
    1043            1 :                                 )
    1044            1 :                         }
    1045              :                 }
    1046              :                 // Wrap the range key iterator in a filter that elides keys other than range
    1047              :                 // key deletions.
    1048            1 :                 iter = keyspan.Filter(iter, func(in *keyspan.Span, buf []keyspan.Key) []keyspan.Key {
    1049            1 :                         keys := buf[:0]
    1050            1 :                         for _, k := range in.Keys {
    1051            1 :                                 if k.Kind() != base.InternalKeyKindRangeKeyDelete {
    1052            1 :                                         continue
    1053              :                                 }
    1054            1 :                                 keys = append(keys, k)
    1055              :                         }
    1056            1 :                         return keys
    1057              :                 }, comparer.Compare)
    1058            1 :                 dIter := &keyspan.DefragmentingIter{}
    1059            1 :                 dIter.Init(comparer, iter, equal, reducer, new(keyspan.DefragmentingBuffers))
    1060            1 :                 iter = dIter
    1061            1 :                 mIter.AddLevel(iter)
    1062              :         }
    1063              : 
    1064            1 :         return mIter, nil
    1065              : }
    1066              : 
    1067              : // rangeKeySetsAnnotator is a manifest.Annotator that annotates B-Tree nodes
    1068              : // with the sum of the files' counts of range key fragments. The count of range
    1069              : // key sets may change once a table's stats are loaded asynchronously, so its
    1070              : // values are marked as cacheable only if a file's stats have been loaded.
    1071            1 : var rangeKeySetsAnnotator = manifest.SumAnnotator(func(f *manifest.TableMetadata) (uint64, bool) {
    1072            1 :         return f.Stats.NumRangeKeySets, f.StatsValid()
    1073            1 : })
    1074              : 
    1075              : // tombstonesAnnotator is a manifest.Annotator that annotates B-Tree nodes
    1076              : // with the sum of the files' counts of tombstones (DEL, SINGLEDEL and RANGEDEL
    1077              : // keys). The count of tombstones may change once a table's stats are loaded
    1078              : // asynchronously, so its values are marked as cacheable only if a file's stats
    1079              : // have been loaded.
    1080            1 : var tombstonesAnnotator = manifest.SumAnnotator(func(f *manifest.TableMetadata) (uint64, bool) {
    1081            1 :         return f.Stats.NumDeletions, f.StatsValid()
    1082            1 : })
    1083              : 
    1084              : // valueBlocksSizeAnnotator is a manifest.Annotator that annotates B-Tree
    1085              : // nodes with the sum of the files' Properties.ValueBlocksSize. The value block
    1086              : // size may change once a table's stats are loaded asynchronously, so its
    1087              : // values are marked as cacheable only if a file's stats have been loaded.
    1088            1 : var valueBlockSizeAnnotator = manifest.SumAnnotator(func(f *manifest.TableMetadata) (uint64, bool) {
    1089            1 :         return f.Stats.ValueBlocksSize, f.StatsValid()
    1090            1 : })
    1091              : 
    1092              : // pointDeletionsBytesEstimateAnnotator is a manifest.Annotator that annotates
    1093              : // B-Tree nodes with the sum of the files' PointDeletionsBytesEstimate. This
    1094              : // value may change once a table's stats are loaded asynchronously, so its
    1095              : // values are marked as cacheable only if a file's stats have been loaded.
    1096            1 : var pointDeletionsBytesEstimateAnnotator = manifest.SumAnnotator(func(f *manifest.TableMetadata) (uint64, bool) {
    1097            1 :         return f.Stats.PointDeletionsBytesEstimate, f.StatsValid()
    1098            1 : })
    1099              : 
    1100              : // rangeDeletionsBytesEstimateAnnotator is a manifest.Annotator that annotates
    1101              : // B-Tree nodes with the sum of the files' RangeDeletionsBytesEstimate. This
    1102              : // value may change once a table's stats are loaded asynchronously, so its
    1103              : // values are marked as cacheable only if a file's stats have been loaded.
    1104            1 : var rangeDeletionsBytesEstimateAnnotator = manifest.SumAnnotator(func(f *manifest.TableMetadata) (uint64, bool) {
    1105            1 :         return f.Stats.RangeDeletionsBytesEstimate, f.StatsValid()
    1106            1 : })
    1107              : 
    1108              : // compressionTypeAnnotator is a manifest.Annotator that annotates B-tree
    1109              : // nodes with the compression type of the file. Its annotation type is
    1110              : // compressionTypes. The compression type may change once a table's stats are
    1111              : // loaded asynchronously, so its values are marked as cacheable only if a file's
    1112              : // stats have been loaded.
    1113              : var compressionTypeAnnotator = manifest.Annotator[compressionTypes]{
    1114              :         Aggregator: compressionTypeAggregator{},
    1115              : }
    1116              : 
    1117              : type compressionTypeAggregator struct{}
    1118              : 
    1119              : type compressionTypes struct {
    1120              :         snappy, zstd, minlz, none, unknown uint64
    1121              : }
    1122              : 
    1123            1 : func (a compressionTypeAggregator) Zero(dst *compressionTypes) *compressionTypes {
    1124            1 :         if dst == nil {
    1125            1 :                 return new(compressionTypes)
    1126            1 :         }
    1127            0 :         *dst = compressionTypes{}
    1128            0 :         return dst
    1129              : }
    1130              : 
    1131              : func (a compressionTypeAggregator) Accumulate(
    1132              :         f *manifest.TableMetadata, dst *compressionTypes,
    1133            1 : ) (v *compressionTypes, cacheOK bool) {
    1134            1 :         switch f.Stats.CompressionType {
    1135            1 :         case sstable.SnappyCompression:
    1136            1 :                 dst.snappy++
    1137            1 :         case sstable.ZstdCompression:
    1138            1 :                 dst.zstd++
    1139            1 :         case sstable.MinLZCompression:
    1140            1 :                 dst.minlz++
    1141            1 :         case sstable.NoCompression:
    1142            1 :                 dst.none++
    1143            1 :         default:
    1144            1 :                 dst.unknown++
    1145              :         }
    1146            1 :         return dst, f.StatsValid()
    1147              : }
    1148              : 
    1149              : func (a compressionTypeAggregator) Merge(
    1150              :         src *compressionTypes, dst *compressionTypes,
    1151            1 : ) *compressionTypes {
    1152            1 :         dst.snappy += src.snappy
    1153            1 :         dst.zstd += src.zstd
    1154            1 :         dst.minlz += src.minlz
    1155            1 :         dst.none += src.none
    1156            1 :         dst.unknown += src.unknown
    1157            1 :         return dst
    1158            1 : }
        

Generated by: LCOV version 2.0-1