LCOV - code coverage report
Current view: top level - pebble - metrics.go (source / functions) Coverage Total Hit
Test: 2025-02-28 08:17Z 9af14eed - meta test only.lcov Lines: 11.6 % 303 35
Test Date: 2025-02-28 08:18:29 Functions: - 0 0

            Line data    Source code
       1              : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
       2              : // of this source code is governed by a BSD-style license that can be found in
       3              : // the LICENSE file.
       4              : 
       5              : package pebble
       6              : 
       7              : import (
       8              :         "fmt"
       9              :         "math"
      10              :         "time"
      11              : 
      12              :         "github.com/cockroachdb/pebble/internal/base"
      13              :         "github.com/cockroachdb/pebble/internal/cache"
      14              :         "github.com/cockroachdb/pebble/internal/humanize"
      15              :         "github.com/cockroachdb/pebble/internal/manual"
      16              :         "github.com/cockroachdb/pebble/objstorage/objstorageprovider/sharedcache"
      17              :         "github.com/cockroachdb/pebble/record"
      18              :         "github.com/cockroachdb/pebble/sstable"
      19              :         "github.com/cockroachdb/pebble/sstable/block"
      20              :         "github.com/cockroachdb/pebble/wal"
      21              :         "github.com/cockroachdb/redact"
      22              :         "github.com/prometheus/client_golang/prometheus"
      23              : )
      24              : 
      25              : // CacheMetrics holds metrics for the block and file cache.
      26              : type CacheMetrics = cache.Metrics
      27              : 
      28              : // FilterMetrics holds metrics for the filter policy
      29              : type FilterMetrics = sstable.FilterMetrics
      30              : 
      31              : // ThroughputMetric is a cumulative throughput metric. See the detailed
      32              : // comment in base.
      33              : type ThroughputMetric = base.ThroughputMetric
      34              : 
      35              : // SecondaryCacheMetrics holds metrics for the persistent secondary cache
      36              : // that caches commonly accessed blocks from blob storage on a local
      37              : // file system.
      38              : type SecondaryCacheMetrics = sharedcache.Metrics
      39              : 
      40              : // LevelMetrics holds per-level metrics such as the number of files and total
      41              : // size of the files, and compaction related metrics.
      42              : type LevelMetrics struct {
      43              :         // The number of sublevels within the level. The sublevel count corresponds
      44              :         // to the read amplification for the level. An empty level will have a
      45              :         // sublevel count of 0, implying no read amplification. Only L0 will have
      46              :         // a sublevel count other than 0 or 1.
      47              :         Sublevels int32
      48              :         // The total number of files in the level.
      49              :         NumFiles int64
      50              :         // The total number of virtual sstables in the level.
      51              :         NumVirtualFiles uint64
      52              :         // The total size in bytes of the files in the level.
      53              :         Size int64
      54              :         // The total size of the virtual sstables in the level.
      55              :         VirtualSize uint64
      56              :         // The level's compaction score. This is the compensatedScoreRatio in the
      57              :         // candidateLevelInfo.
      58              :         Score float64
      59              :         // The number of incoming bytes from other levels read during
      60              :         // compactions. This excludes bytes moved and bytes ingested. For L0 this is
      61              :         // the bytes written to the WAL.
      62              :         BytesIn uint64
      63              :         // The number of bytes ingested. The sibling metric for tables is
      64              :         // TablesIngested.
      65              :         BytesIngested uint64
      66              :         // The number of bytes moved into the level by a "move" compaction. The
      67              :         // sibling metric for tables is TablesMoved.
      68              :         BytesMoved uint64
      69              :         // The number of bytes read for compactions at the level. This includes bytes
      70              :         // read from other levels (BytesIn), as well as bytes read for the level.
      71              :         BytesRead uint64
      72              :         // The number of bytes written during compactions. The sibling
      73              :         // metric for tables is TablesCompacted. This metric may be summed
      74              :         // with BytesFlushed to compute the total bytes written for the level.
      75              :         BytesCompacted uint64
      76              :         // The number of bytes written during flushes. The sibling
      77              :         // metrics for tables is TablesFlushed. This metric is always
      78              :         // zero for all levels other than L0.
      79              :         BytesFlushed uint64
      80              :         // The number of sstables compacted to this level.
      81              :         TablesCompacted uint64
      82              :         // The number of sstables flushed to this level.
      83              :         TablesFlushed uint64
      84              :         // The number of sstables ingested into the level.
      85              :         TablesIngested uint64
      86              :         // The number of sstables moved to this level by a "move" compaction.
      87              :         TablesMoved uint64
      88              :         // The number of sstables deleted in a level by a delete-only compaction.
      89              :         TablesDeleted uint64
      90              :         // The number of sstables excised in a level by a delete-only compaction.
      91              :         TablesExcised uint64
      92              : 
      93              :         MultiLevel struct {
      94              :                 // BytesInTop are the total bytes in a multilevel compaction coming from the top level.
      95              :                 BytesInTop uint64
      96              : 
      97              :                 // BytesIn, exclusively for multiLevel compactions.
      98              :                 BytesIn uint64
      99              : 
     100              :                 // BytesRead, exclusively for multilevel compactions.
     101              :                 BytesRead uint64
     102              :         }
     103              : 
     104              :         // Additional contains misc additional metrics that are not always printed.
     105              :         Additional struct {
     106              :                 // The sum of Properties.ValueBlocksSize for all the sstables in this
     107              :                 // level. Printed by LevelMetrics.format iff there is at least one level
     108              :                 // with a non-zero value.
     109              :                 ValueBlocksSize uint64
     110              :                 // Cumulative metrics about bytes written to data blocks and value blocks,
     111              :                 // via compactions (except move compactions) or flushes. Not printed by
     112              :                 // LevelMetrics.format, but are available to sophisticated clients.
     113              :                 BytesWrittenDataBlocks  uint64
     114              :                 BytesWrittenValueBlocks uint64
     115              :         }
     116              : }
     117              : 
     118              : // Add updates the counter metrics for the level.
     119            1 : func (m *LevelMetrics) Add(u *LevelMetrics) {
     120            1 :         m.NumFiles += u.NumFiles
     121            1 :         m.NumVirtualFiles += u.NumVirtualFiles
     122            1 :         m.VirtualSize += u.VirtualSize
     123            1 :         m.Size += u.Size
     124            1 :         m.BytesIn += u.BytesIn
     125            1 :         m.BytesIngested += u.BytesIngested
     126            1 :         m.BytesMoved += u.BytesMoved
     127            1 :         m.BytesRead += u.BytesRead
     128            1 :         m.BytesCompacted += u.BytesCompacted
     129            1 :         m.BytesFlushed += u.BytesFlushed
     130            1 :         m.TablesCompacted += u.TablesCompacted
     131            1 :         m.TablesFlushed += u.TablesFlushed
     132            1 :         m.TablesIngested += u.TablesIngested
     133            1 :         m.TablesMoved += u.TablesMoved
     134            1 :         m.MultiLevel.BytesInTop += u.MultiLevel.BytesInTop
     135            1 :         m.MultiLevel.BytesRead += u.MultiLevel.BytesRead
     136            1 :         m.MultiLevel.BytesIn += u.MultiLevel.BytesIn
     137            1 :         m.Additional.BytesWrittenDataBlocks += u.Additional.BytesWrittenDataBlocks
     138            1 :         m.Additional.BytesWrittenValueBlocks += u.Additional.BytesWrittenValueBlocks
     139            1 :         m.Additional.ValueBlocksSize += u.Additional.ValueBlocksSize
     140            1 : }
     141              : 
     142              : // WriteAmp computes the write amplification for compactions at this
     143              : // level. Computed as (BytesFlushed + BytesCompacted) / BytesIn.
     144            0 : func (m *LevelMetrics) WriteAmp() float64 {
     145            0 :         if m.BytesIn == 0 {
     146            0 :                 return 0
     147            0 :         }
     148            0 :         return float64(m.BytesFlushed+m.BytesCompacted) / float64(m.BytesIn)
     149              : }
     150              : 
     151              : var categoryCompaction = block.RegisterCategory("pebble-compaction", block.NonLatencySensitiveQoSLevel)
     152              : var categoryIngest = block.RegisterCategory("pebble-ingest", block.LatencySensitiveQoSLevel)
     153              : var categoryGet = block.RegisterCategory("pebble-get", block.LatencySensitiveQoSLevel)
     154              : 
     155              : // Metrics holds metrics for various subsystems of the DB such as the Cache,
     156              : // Compactions, WAL, and per-Level metrics.
     157              : //
     158              : // TODO(peter): The testing of these metrics is relatively weak. There should
     159              : // be testing that performs various operations on a DB and verifies that the
     160              : // metrics reflect those operations.
     161              : type Metrics struct {
     162              :         BlockCache CacheMetrics
     163              : 
     164              :         Compact struct {
     165              :                 // The total number of compactions, and per-compaction type counts.
     166              :                 Count                 int64
     167              :                 DefaultCount          int64
     168              :                 DeleteOnlyCount       int64
     169              :                 ElisionOnlyCount      int64
     170              :                 CopyCount             int64
     171              :                 MoveCount             int64
     172              :                 ReadCount             int64
     173              :                 TombstoneDensityCount int64
     174              :                 RewriteCount          int64
     175              :                 MultiLevelCount       int64
     176              :                 CounterLevelCount     int64
     177              :                 // An estimate of the number of bytes that need to be compacted for the LSM
     178              :                 // to reach a stable state.
     179              :                 EstimatedDebt uint64
     180              :                 // Number of bytes present in sstables being written by in-progress
     181              :                 // compactions. This value will be zero if there are no in-progress
     182              :                 // compactions.
     183              :                 InProgressBytes int64
     184              :                 // Number of compactions that are in-progress.
     185              :                 NumInProgress int64
     186              :                 // Number of compactions that were cancelled.
     187              :                 CancelledCount int64
     188              :                 // CancelledBytes the number of bytes written by compactions that were
     189              :                 // cancelled.
     190              :                 CancelledBytes int64
     191              :                 // MarkedFiles is a count of files that are marked for
     192              :                 // compaction. Such files are compacted in a rewrite compaction
     193              :                 // when no other compactions are picked.
     194              :                 MarkedFiles int
     195              :                 // Duration records the cumulative duration of all compactions since the
     196              :                 // database was opened.
     197              :                 Duration time.Duration
     198              :         }
     199              : 
     200              :         Ingest struct {
     201              :                 // The total number of ingestions
     202              :                 Count uint64
     203              :         }
     204              : 
     205              :         Flush struct {
     206              :                 // The total number of flushes.
     207              :                 Count           int64
     208              :                 WriteThroughput ThroughputMetric
     209              :                 // Number of flushes that are in-progress. In the current implementation
     210              :                 // this will always be zero or one.
     211              :                 NumInProgress int64
     212              :                 // AsIngestCount is a monotonically increasing counter of flush operations
     213              :                 // handling ingested tables.
     214              :                 AsIngestCount uint64
     215              :                 // AsIngestCount is a monotonically increasing counter of tables ingested as
     216              :                 // flushables.
     217              :                 AsIngestTableCount uint64
     218              :                 // AsIngestBytes is a monotonically increasing counter of the bytes flushed
     219              :                 // for flushables that originated as ingestion operations.
     220              :                 AsIngestBytes uint64
     221              :         }
     222              : 
     223              :         Filter FilterMetrics
     224              : 
     225              :         Levels [numLevels]LevelMetrics
     226              : 
     227              :         MemTable struct {
     228              :                 // The number of bytes allocated by memtables and large (flushable)
     229              :                 // batches.
     230              :                 Size uint64
     231              :                 // The count of memtables.
     232              :                 Count int64
     233              :                 // The number of bytes present in zombie memtables which are no longer
     234              :                 // referenced by the current DB state. An unbounded number of memtables
     235              :                 // may be zombie if they're still in use by an iterator. One additional
     236              :                 // memtable may be zombie if it's no longer in use and waiting to be
     237              :                 // recycled.
     238              :                 ZombieSize uint64
     239              :                 // The count of zombie memtables.
     240              :                 ZombieCount int64
     241              :         }
     242              : 
     243              :         Keys struct {
     244              :                 // The approximate count of internal range key set keys in the database.
     245              :                 RangeKeySetsCount uint64
     246              :                 // The approximate count of internal tombstones (DEL, SINGLEDEL and
     247              :                 // RANGEDEL key kinds) within the database.
     248              :                 TombstoneCount uint64
     249              :                 // A cumulative total number of missized DELSIZED keys encountered by
     250              :                 // compactions since the database was opened.
     251              :                 MissizedTombstonesCount uint64
     252              :         }
     253              : 
     254              :         Snapshots struct {
     255              :                 // The number of currently open snapshots.
     256              :                 Count int
     257              :                 // The sequence number of the earliest, currently open snapshot.
     258              :                 EarliestSeqNum base.SeqNum
     259              :                 // A running tally of keys written to sstables during flushes or
     260              :                 // compactions that would've been elided if it weren't for open
     261              :                 // snapshots.
     262              :                 PinnedKeys uint64
     263              :                 // A running cumulative sum of the size of keys and values written to
     264              :                 // sstables during flushes or compactions that would've been elided if
     265              :                 // it weren't for open snapshots.
     266              :                 PinnedSize uint64
     267              :         }
     268              : 
     269              :         Table struct {
     270              :                 // The number of bytes present in obsolete tables which are no longer
     271              :                 // referenced by the current DB state or any open iterators.
     272              :                 ObsoleteSize uint64
     273              :                 // The count of obsolete tables.
     274              :                 ObsoleteCount int64
     275              :                 // The number of bytes present in zombie tables which are no longer
     276              :                 // referenced by the current DB state but are still in use by an iterator.
     277              :                 ZombieSize uint64
     278              :                 // The count of zombie tables.
     279              :                 ZombieCount int64
     280              :                 // The count of sstables backing virtual tables.
     281              :                 BackingTableCount uint64
     282              :                 // The sum of the sizes of the BackingTableCount sstables that are backing virtual tables.
     283              :                 BackingTableSize uint64
     284              :                 // The number of sstables that are compressed with an unknown compression
     285              :                 // algorithm.
     286              :                 CompressedCountUnknown int64
     287              :                 // The number of sstables that are compressed with the default compression
     288              :                 // algorithm, snappy.
     289              :                 CompressedCountSnappy int64
     290              :                 // The number of sstables that are compressed with zstd.
     291              :                 CompressedCountZstd int64
     292              :                 // The number of sstables that are uncompressed.
     293              :                 CompressedCountNone int64
     294              : 
     295              :                 // Local file sizes.
     296              :                 Local struct {
     297              :                         // LiveSize is the number of bytes in live tables.
     298              :                         LiveSize uint64
     299              :                         // ObsoleteSize is the number of bytes in obsolete tables.
     300              :                         ObsoleteSize uint64
     301              :                         // ZombieSize is the number of bytes in zombie tables.
     302              :                         ZombieSize uint64
     303              :                 }
     304              :         }
     305              : 
     306              :         FileCache CacheMetrics
     307              : 
     308              :         // Count of the number of open sstable iterators.
     309              :         TableIters int64
     310              :         // Uptime is the total time since this DB was opened.
     311              :         Uptime time.Duration
     312              : 
     313              :         WAL struct {
     314              :                 // Number of live WAL files.
     315              :                 Files int64
     316              :                 // Number of obsolete WAL files.
     317              :                 ObsoleteFiles int64
     318              :                 // Physical size of the obsolete WAL files.
     319              :                 ObsoletePhysicalSize uint64
     320              :                 // Size of the live data in the WAL files. Note that with WAL file
     321              :                 // recycling this is less than the actual on-disk size of the WAL files.
     322              :                 Size uint64
     323              :                 // Physical size of the WAL files on-disk. With WAL file recycling,
     324              :                 // this is greater than the live data in WAL files.
     325              :                 //
     326              :                 // TODO(sumeer): it seems this does not include ObsoletePhysicalSize.
     327              :                 // Should the comment be updated?
     328              :                 PhysicalSize uint64
     329              :                 // Number of logical bytes written to the WAL.
     330              :                 BytesIn uint64
     331              :                 // Number of bytes written to the WAL.
     332              :                 BytesWritten uint64
     333              :                 // Failover contains failover stats. Empty if failover is not enabled.
     334              :                 Failover wal.FailoverStats
     335              :         }
     336              : 
     337              :         LogWriter struct {
     338              :                 FsyncLatency prometheus.Histogram
     339              :                 record.LogWriterMetrics
     340              :         }
     341              : 
     342              :         CategoryStats []block.CategoryStatsAggregate
     343              : 
     344              :         SecondaryCacheMetrics SecondaryCacheMetrics
     345              : 
     346              :         private struct {
     347              :                 optionsFileSize  uint64
     348              :                 manifestFileSize uint64
     349              :         }
     350              : 
     351              :         manualMemory manual.Metrics
     352              : }
     353              : 
     354              : var (
     355              :         // FsyncLatencyBuckets are prometheus histogram buckets suitable for a histogram
     356              :         // that records latencies for fsyncs.
     357              :         FsyncLatencyBuckets = append(
     358              :                 prometheus.LinearBuckets(0.0, float64(time.Microsecond*100), 50),
     359              :                 prometheus.ExponentialBucketsRange(float64(time.Millisecond*5), float64(10*time.Second), 50)...,
     360              :         )
     361              : 
     362              :         // SecondaryCacheIOBuckets exported to enable exporting from package pebble to
     363              :         // enable exporting metrics with below buckets in CRDB.
     364              :         SecondaryCacheIOBuckets = sharedcache.IOBuckets
     365              :         // SecondaryCacheChannelWriteBuckets exported to enable exporting from package
     366              :         // pebble to enable exporting metrics with below buckets in CRDB.
     367              :         SecondaryCacheChannelWriteBuckets = sharedcache.ChannelWriteBuckets
     368              : )
     369              : 
     370              : // DiskSpaceUsage returns the total disk space used by the database in bytes,
     371              : // including live and obsolete files. This only includes local files, i.e.,
     372              : // remote files (as known to objstorage.Provider) are not included.
     373            0 : func (m *Metrics) DiskSpaceUsage() uint64 {
     374            0 :         var usageBytes uint64
     375            0 :         usageBytes += m.WAL.PhysicalSize
     376            0 :         usageBytes += m.WAL.ObsoletePhysicalSize
     377            0 :         usageBytes += m.Table.Local.LiveSize
     378            0 :         usageBytes += m.Table.Local.ObsoleteSize
     379            0 :         usageBytes += m.Table.Local.ZombieSize
     380            0 :         usageBytes += m.private.optionsFileSize
     381            0 :         usageBytes += m.private.manifestFileSize
     382            0 :         // TODO(sumeer): InProgressBytes does not distinguish between local and
     383            0 :         // remote files. This causes a small error. Fix.
     384            0 :         usageBytes += uint64(m.Compact.InProgressBytes)
     385            0 :         return usageBytes
     386            0 : }
     387              : 
     388              : // NumVirtual is the number of virtual sstables in the latest version
     389              : // summed over every level in the lsm.
     390            0 : func (m *Metrics) NumVirtual() uint64 {
     391            0 :         var n uint64
     392            0 :         for _, level := range m.Levels {
     393            0 :                 n += level.NumVirtualFiles
     394            0 :         }
     395            0 :         return n
     396              : }
     397              : 
     398              : // VirtualSize is the sum of the sizes of the virtual sstables in the
     399              : // latest version. BackingTableSize - VirtualSize gives an estimate for
     400              : // the space amplification caused by not compacting virtual sstables.
     401            0 : func (m *Metrics) VirtualSize() uint64 {
     402            0 :         var size uint64
     403            0 :         for _, level := range m.Levels {
     404            0 :                 size += level.VirtualSize
     405            0 :         }
     406            0 :         return size
     407              : }
     408              : 
     409              : // ReadAmp returns the current read amplification of the database.
     410              : // It's computed as the number of sublevels in L0 + the number of non-empty
     411              : // levels below L0.
     412            0 : func (m *Metrics) ReadAmp() int {
     413            0 :         var ramp int32
     414            0 :         for _, l := range m.Levels {
     415            0 :                 ramp += l.Sublevels
     416            0 :         }
     417            0 :         return int(ramp)
     418              : }
     419              : 
     420              : // Total returns the sum of the per-level metrics and WAL metrics.
     421            1 : func (m *Metrics) Total() LevelMetrics {
     422            1 :         var total LevelMetrics
     423            1 :         for level := 0; level < numLevels; level++ {
     424            1 :                 l := &m.Levels[level]
     425            1 :                 total.Add(l)
     426            1 :                 total.Sublevels += l.Sublevels
     427            1 :         }
     428              :         // Compute total bytes-in as the bytes written to the WAL + bytes ingested.
     429            1 :         total.BytesIn = m.WAL.BytesWritten + total.BytesIngested
     430            1 :         // Add the total bytes-in to the total bytes-flushed. This is to account for
     431            1 :         // the bytes written to the log and bytes written externally and then
     432            1 :         // ingested.
     433            1 :         total.BytesFlushed += total.BytesIn
     434            1 :         return total
     435              : }
     436              : 
     437              : // String pretty-prints the metrics as below:
     438              : //
     439              : //            |                             |       |       |   ingested   |     moved    |    written   |       |    amp
     440              : //      level | tables  size val-bl vtables | score |   in  | tables  size | tables  size | tables  size |  read |   r   w
     441              : //      ------+-----------------------------+-------+-------+--------------+--------------+--------------+-------+---------
     442              : //          0 |   101   102B     0B       0 | 103.0 |  104B |   112   104B |   113   106B |   221   217B |  107B |   1  2.1
     443              : //          1 |   201   202B     0B       0 | 203.0 |  204B |   212   204B |   213   206B |   421   417B |  207B |   2  2.0
     444              : //          2 |   301   302B     0B       0 | 303.0 |  304B |   312   304B |   313   306B |   621   617B |  307B |   3  2.0
     445              : //          3 |   401   402B     0B       0 | 403.0 |  404B |   412   404B |   413   406B |   821   817B |  407B |   4  2.0
     446              : //          4 |   501   502B     0B       0 | 503.0 |  504B |   512   504B |   513   506B |  1.0K  1017B |  507B |   5  2.0
     447              : //          5 |   601   602B     0B       0 | 603.0 |  604B |   612   604B |   613   606B |  1.2K  1.2KB |  607B |   6  2.0
     448              : //          6 |   701   702B     0B       0 |     - |  704B |   712   704B |   713   706B |  1.4K  1.4KB |  707B |   7  2.0
     449              : //      total |  2.8K  2.7KB     0B       0 |     - | 2.8KB |  2.9K  2.8KB |  2.9K  2.8KB |  5.7K  8.4KB | 2.8KB |  28  3.0
     450              : //      -------------------------------------------------------------------------------------------------------------------
     451              : //      WAL: 22 files (24B)  in: 25B  written: 26B (4% overhead)
     452              : //      Flushes: 8
     453              : //      Compactions: 5  estimated debt: 6B  in progress: 2 (7B)
     454              : //      default: 27  delete: 28  elision: 29  move: 30  read: 31  rewrite: 32  multi-level: 33
     455              : //      MemTables: 12 (11B)  zombie: 14 (13B)
     456              : //      Zombie tables: 16 (15B)
     457              : //      Backing tables: 0 (0B)
     458              : //      Block cache: 2 entries (1B)  hit rate: 42.9%
     459              : //      Table cache: 18 entries (17B)  hit rate: 48.7%
     460              : //      Secondary cache: 40 entries (40B)  hit rate: 49.9%
     461              : //      Snapshots: 4  earliest seq num: 1024
     462              : //      Table iters: 21
     463              : //      Filter utility: 47.4%
     464              : //      Ingestions: 27  as flushable: 36 (34B in 35 tables)
     465            0 : func (m *Metrics) String() string {
     466            0 :         return redact.StringWithoutMarkers(m)
     467            0 : }
     468              : 
     469              : var _ redact.SafeFormatter = &Metrics{}
     470              : 
     471              : // SafeFormat implements redact.SafeFormatter.
     472            0 : func (m *Metrics) SafeFormat(w redact.SafePrinter, _ rune) {
     473            0 :         // NB: Pebble does not make any assumptions as to which Go primitive types
     474            0 :         // have been registered as safe with redact.RegisterSafeType and does not
     475            0 :         // register any types itself. Some of the calls to `redact.Safe`, etc are
     476            0 :         // superfluous in the context of CockroachDB, which registers all the Go
     477            0 :         // numeric types as safe.
     478            0 : 
     479            0 :         // TODO(jackson): There are a few places where we use redact.SafeValue
     480            0 :         // instead of redact.RedactableString. This is necessary because of a bug
     481            0 :         // whereby formatting a redact.RedactableString argument does not respect
     482            0 :         // width specifiers. When the issue is fixed, we can convert these to
     483            0 :         // RedactableStrings. https://github.com/cockroachdb/redact/issues/17
     484            0 : 
     485            0 :         multiExists := m.Compact.MultiLevelCount > 0
     486            0 :         appendIfMulti := func(line redact.SafeString) {
     487            0 :                 if multiExists {
     488            0 :                         w.SafeString(line)
     489            0 :                 }
     490              :         }
     491            0 :         newline := func() {
     492            0 :                 w.SafeString("\n")
     493            0 :         }
     494              : 
     495            0 :         w.SafeString("      |                             |       |       |   ingested   |     moved    |    written   |       |    amp")
     496            0 :         appendIfMulti("   |     multilevel")
     497            0 :         newline()
     498            0 :         w.SafeString("level | tables  size val-bl vtables | score |   in  | tables  size | tables  size | tables  size |  read |   r   w")
     499            0 :         appendIfMulti("  |    top   in  read")
     500            0 :         newline()
     501            0 :         w.SafeString("------+-----------------------------+-------+-------+--------------+--------------+--------------+-------+---------")
     502            0 :         appendIfMulti("-+------------------")
     503            0 :         newline()
     504            0 : 
     505            0 :         // formatRow prints out a row of the table.
     506            0 :         formatRow := func(m *LevelMetrics, score float64) {
     507            0 :                 scoreStr := "-"
     508            0 :                 if !math.IsNaN(score) {
     509            0 :                         // Try to keep the string no longer than 5 characters.
     510            0 :                         switch {
     511            0 :                         case score < 99.995:
     512            0 :                                 scoreStr = fmt.Sprintf("%.2f", score)
     513            0 :                         case score < 999.95:
     514            0 :                                 scoreStr = fmt.Sprintf("%.1f", score)
     515            0 :                         default:
     516            0 :                                 scoreStr = fmt.Sprintf("%.0f", score)
     517              :                         }
     518              :                 }
     519            0 :                 var wampStr string
     520            0 :                 if wamp := m.WriteAmp(); wamp > 99.5 {
     521            0 :                         wampStr = fmt.Sprintf("%.0f", wamp)
     522            0 :                 } else {
     523            0 :                         wampStr = fmt.Sprintf("%.1f", wamp)
     524            0 :                 }
     525              : 
     526            0 :                 w.Printf("| %5s %6s %6s %7s | %5s | %5s | %5s %6s | %5s %6s | %5s %6s | %5s | %3d %4s",
     527            0 :                         humanize.Count.Int64(m.NumFiles),
     528            0 :                         humanize.Bytes.Int64(m.Size),
     529            0 :                         humanize.Bytes.Uint64(m.Additional.ValueBlocksSize),
     530            0 :                         humanize.Count.Uint64(m.NumVirtualFiles),
     531            0 :                         redact.Safe(scoreStr),
     532            0 :                         humanize.Bytes.Uint64(m.BytesIn),
     533            0 :                         humanize.Count.Uint64(m.TablesIngested),
     534            0 :                         humanize.Bytes.Uint64(m.BytesIngested),
     535            0 :                         humanize.Count.Uint64(m.TablesMoved),
     536            0 :                         humanize.Bytes.Uint64(m.BytesMoved),
     537            0 :                         humanize.Count.Uint64(m.TablesFlushed+m.TablesCompacted),
     538            0 :                         humanize.Bytes.Uint64(m.BytesFlushed+m.BytesCompacted),
     539            0 :                         humanize.Bytes.Uint64(m.BytesRead),
     540            0 :                         redact.Safe(m.Sublevels),
     541            0 :                         redact.Safe(wampStr))
     542            0 : 
     543            0 :                 if multiExists {
     544            0 :                         w.Printf(" | %5s %5s %5s",
     545            0 :                                 humanize.Bytes.Uint64(m.MultiLevel.BytesInTop),
     546            0 :                                 humanize.Bytes.Uint64(m.MultiLevel.BytesIn),
     547            0 :                                 humanize.Bytes.Uint64(m.MultiLevel.BytesRead))
     548            0 :                 }
     549            0 :                 newline()
     550              :         }
     551              : 
     552            0 :         var total LevelMetrics
     553            0 :         for level := 0; level < numLevels; level++ {
     554            0 :                 l := &m.Levels[level]
     555            0 :                 w.Printf("%5d ", redact.Safe(level))
     556            0 : 
     557            0 :                 // Format the score.
     558            0 :                 score := math.NaN()
     559            0 :                 if level < numLevels-1 {
     560            0 :                         score = l.Score
     561            0 :                 }
     562            0 :                 formatRow(l, score)
     563            0 :                 total.Add(l)
     564            0 :                 total.Sublevels += l.Sublevels
     565              :         }
     566              :         // Compute total bytes-in as the bytes written to the WAL + bytes ingested.
     567            0 :         total.BytesIn = m.WAL.BytesWritten + total.BytesIngested
     568            0 :         // Add the total bytes-in to the total bytes-flushed. This is to account for
     569            0 :         // the bytes written to the log and bytes written externally and then
     570            0 :         // ingested.
     571            0 :         total.BytesFlushed += total.BytesIn
     572            0 :         w.SafeString("total ")
     573            0 :         formatRow(&total, math.NaN())
     574            0 : 
     575            0 :         w.SafeString("-------------------------------------------------------------------------------------------------------------------")
     576            0 :         appendIfMulti("--------------------")
     577            0 :         newline()
     578            0 :         w.Printf("WAL: %d files (%s)  in: %s  written: %s (%.0f%% overhead)",
     579            0 :                 redact.Safe(m.WAL.Files),
     580            0 :                 humanize.Bytes.Uint64(m.WAL.Size),
     581            0 :                 humanize.Bytes.Uint64(m.WAL.BytesIn),
     582            0 :                 humanize.Bytes.Uint64(m.WAL.BytesWritten),
     583            0 :                 redact.Safe(percent(int64(m.WAL.BytesWritten)-int64(m.WAL.BytesIn), int64(m.WAL.BytesIn))))
     584            0 :         failoverStats := m.WAL.Failover
     585            0 :         failoverStats.FailoverWriteAndSyncLatency = nil
     586            0 :         if failoverStats == (wal.FailoverStats{}) {
     587            0 :                 w.Printf("\n")
     588            0 :         } else {
     589            0 :                 w.Printf(" failover: (switches: %d, primary: %s, secondary: %s)\n", m.WAL.Failover.DirSwitchCount,
     590            0 :                         m.WAL.Failover.PrimaryWriteDuration.String(), m.WAL.Failover.SecondaryWriteDuration.String())
     591            0 :         }
     592              : 
     593            0 :         w.Printf("Flushes: %d\n", redact.Safe(m.Flush.Count))
     594            0 : 
     595            0 :         w.Printf("Compactions: %d  estimated debt: %s  in progress: %d (%s)\n",
     596            0 :                 redact.Safe(m.Compact.Count),
     597            0 :                 humanize.Bytes.Uint64(m.Compact.EstimatedDebt),
     598            0 :                 redact.Safe(m.Compact.NumInProgress),
     599            0 :                 humanize.Bytes.Int64(m.Compact.InProgressBytes))
     600            0 : 
     601            0 :         w.Printf("             default: %d  delete: %d  elision: %d  move: %d  read: %d  tombstone-density: %d  rewrite: %d  copy: %d  multi-level: %d\n",
     602            0 :                 redact.Safe(m.Compact.DefaultCount),
     603            0 :                 redact.Safe(m.Compact.DeleteOnlyCount),
     604            0 :                 redact.Safe(m.Compact.ElisionOnlyCount),
     605            0 :                 redact.Safe(m.Compact.MoveCount),
     606            0 :                 redact.Safe(m.Compact.ReadCount),
     607            0 :                 redact.Safe(m.Compact.TombstoneDensityCount),
     608            0 :                 redact.Safe(m.Compact.RewriteCount),
     609            0 :                 redact.Safe(m.Compact.CopyCount),
     610            0 :                 redact.Safe(m.Compact.MultiLevelCount))
     611            0 : 
     612            0 :         w.Printf("MemTables: %d (%s)  zombie: %d (%s)\n",
     613            0 :                 redact.Safe(m.MemTable.Count),
     614            0 :                 humanize.Bytes.Uint64(m.MemTable.Size),
     615            0 :                 redact.Safe(m.MemTable.ZombieCount),
     616            0 :                 humanize.Bytes.Uint64(m.MemTable.ZombieSize))
     617            0 : 
     618            0 :         w.Printf("Zombie tables: %d (%s, local: %s)\n",
     619            0 :                 redact.Safe(m.Table.ZombieCount),
     620            0 :                 humanize.Bytes.Uint64(m.Table.ZombieSize),
     621            0 :                 humanize.Bytes.Uint64(m.Table.Local.ZombieSize))
     622            0 : 
     623            0 :         w.Printf("Backing tables: %d (%s)\n",
     624            0 :                 redact.Safe(m.Table.BackingTableCount),
     625            0 :                 humanize.Bytes.Uint64(m.Table.BackingTableSize))
     626            0 :         w.Printf("Virtual tables: %d (%s)\n",
     627            0 :                 redact.Safe(m.NumVirtual()),
     628            0 :                 humanize.Bytes.Uint64(m.VirtualSize()))
     629            0 :         w.Printf("Local tables size: %s\n", humanize.Bytes.Uint64(m.Table.Local.LiveSize))
     630            0 :         w.SafeString("Compression types:")
     631            0 :         if count := m.Table.CompressedCountSnappy; count > 0 {
     632            0 :                 w.Printf(" snappy: %d", redact.Safe(count))
     633            0 :         }
     634            0 :         if count := m.Table.CompressedCountZstd; count > 0 {
     635            0 :                 w.Printf(" zstd: %d", redact.Safe(count))
     636            0 :         }
     637            0 :         if count := m.Table.CompressedCountNone; count > 0 {
     638            0 :                 w.Printf(" none: %d", redact.Safe(count))
     639            0 :         }
     640            0 :         if count := m.Table.CompressedCountUnknown; count > 0 {
     641            0 :                 w.Printf(" unknown: %d", redact.Safe(count))
     642            0 :         }
     643            0 :         w.Print("\n")
     644            0 : 
     645            0 :         formatCacheMetrics := func(m *CacheMetrics, name redact.SafeString) {
     646            0 :                 w.Printf("%s: %s entries (%s)  hit rate: %.1f%%\n",
     647            0 :                         name,
     648            0 :                         humanize.Count.Int64(m.Count),
     649            0 :                         humanize.Bytes.Int64(m.Size),
     650            0 :                         redact.Safe(hitRate(m.Hits, m.Misses)))
     651            0 :         }
     652            0 :         formatCacheMetrics(&m.BlockCache, "Block cache")
     653            0 :         formatCacheMetrics(&m.FileCache, "Table cache")
     654            0 : 
     655            0 :         formatSharedCacheMetrics := func(w redact.SafePrinter, m *SecondaryCacheMetrics, name redact.SafeString) {
     656            0 :                 w.Printf("%s: %s entries (%s)  hit rate: %.1f%%\n",
     657            0 :                         name,
     658            0 :                         humanize.Count.Int64(m.Count),
     659            0 :                         humanize.Bytes.Int64(m.Size),
     660            0 :                         redact.Safe(hitRate(m.ReadsWithFullHit, m.ReadsWithPartialHit+m.ReadsWithNoHit)))
     661            0 :         }
     662            0 :         if m.SecondaryCacheMetrics.Size > 0 || m.SecondaryCacheMetrics.ReadsWithFullHit > 0 {
     663            0 :                 formatSharedCacheMetrics(w, &m.SecondaryCacheMetrics, "Secondary cache")
     664            0 :         }
     665              : 
     666            0 :         w.Printf("Snapshots: %d  earliest seq num: %d\n",
     667            0 :                 redact.Safe(m.Snapshots.Count),
     668            0 :                 redact.Safe(m.Snapshots.EarliestSeqNum))
     669            0 : 
     670            0 :         w.Printf("Table iters: %d\n", redact.Safe(m.TableIters))
     671            0 :         w.Printf("Filter utility: %.1f%%\n", redact.Safe(hitRate(m.Filter.Hits, m.Filter.Misses)))
     672            0 :         w.Printf("Ingestions: %d  as flushable: %d (%s in %d tables)\n",
     673            0 :                 redact.Safe(m.Ingest.Count),
     674            0 :                 redact.Safe(m.Flush.AsIngestCount),
     675            0 :                 humanize.Bytes.Uint64(m.Flush.AsIngestBytes),
     676            0 :                 redact.Safe(m.Flush.AsIngestTableCount))
     677            0 : 
     678            0 :         var inUseTotal uint64
     679            0 :         for i := range m.manualMemory {
     680            0 :                 inUseTotal += m.manualMemory[i].InUseBytes
     681            0 :         }
     682            0 :         inUse := func(purpose manual.Purpose) uint64 {
     683            0 :                 return m.manualMemory[purpose].InUseBytes
     684            0 :         }
     685            0 :         w.Printf("Cgo memory usage: %s  block cache: %s (data: %s, maps: %s, entries: %s)  memtables: %s\n",
     686            0 :                 humanize.Bytes.Uint64(inUseTotal),
     687            0 :                 humanize.Bytes.Uint64(inUse(manual.BlockCacheData)+inUse(manual.BlockCacheMap)+inUse(manual.BlockCacheEntry)),
     688            0 :                 humanize.Bytes.Uint64(inUse(manual.BlockCacheData)),
     689            0 :                 humanize.Bytes.Uint64(inUse(manual.BlockCacheMap)),
     690            0 :                 humanize.Bytes.Uint64(inUse(manual.BlockCacheEntry)),
     691            0 :                 humanize.Bytes.Uint64(inUse(manual.MemTable)),
     692            0 :         )
     693              : }
     694              : 
     695            0 : func hitRate(hits, misses int64) float64 {
     696            0 :         return percent(hits, hits+misses)
     697            0 : }
     698              : 
     699            0 : func percent(numerator, denominator int64) float64 {
     700            0 :         if denominator == 0 {
     701            0 :                 return 0
     702            0 :         }
     703            0 :         return 100 * float64(numerator) / float64(denominator)
     704              : }
     705              : 
     706              : // StringForTests is identical to m.String() on 64-bit platforms. It is used to
     707              : // provide a platform-independent result for tests.
     708            0 : func (m *Metrics) StringForTests() string {
     709            0 :         mCopy := *m
     710            0 :         if math.MaxInt == math.MaxInt32 {
     711            0 :                 // This is the difference in Sizeof(sstable.Reader{})) between 64 and 32 bit
     712            0 :                 // platforms.
     713            0 :                 const tableCacheSizeAdjustment = 212
     714            0 :                 mCopy.FileCache.Size += mCopy.FileCache.Count * tableCacheSizeAdjustment
     715            0 :         }
     716              :         // Don't show cgo memory statistics as they can vary based on architecture,
     717              :         // invariants tag, etc.
     718            0 :         mCopy.manualMemory = manual.Metrics{}
     719            0 :         return redact.StringWithoutMarkers(&mCopy)
     720              : }
        

Generated by: LCOV version 2.0-1