LCOV - code coverage report
Current view: top level - pebble - metrics.go (source / functions) Coverage Total Hit
Test: 2025-10-09 08:18Z 056b059d - tests + meta.lcov Lines: 96.0 % 526 505
Test Date: 2025-10-09 08:22:32 Functions: - 0 0

            Line data    Source code
       1              : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
       2              : // of this source code is governed by a BSD-style license that can be found in
       3              : // the LICENSE file.
       4              : 
       5              : package pebble
       6              : 
       7              : import (
       8              :         "fmt"
       9              :         "iter"
      10              :         "math"
      11              :         "slices"
      12              :         "time"
      13              :         "unsafe"
      14              : 
      15              :         "github.com/cockroachdb/crlib/crhumanize"
      16              :         "github.com/cockroachdb/pebble/internal/ascii"
      17              :         "github.com/cockroachdb/pebble/internal/ascii/table"
      18              :         "github.com/cockroachdb/pebble/internal/base"
      19              :         "github.com/cockroachdb/pebble/internal/cache"
      20              :         "github.com/cockroachdb/pebble/internal/compression"
      21              :         "github.com/cockroachdb/pebble/internal/manifest"
      22              :         "github.com/cockroachdb/pebble/internal/manual"
      23              :         "github.com/cockroachdb/pebble/objstorage/objstorageprovider/sharedcache"
      24              :         "github.com/cockroachdb/pebble/record"
      25              :         "github.com/cockroachdb/pebble/sstable"
      26              :         "github.com/cockroachdb/pebble/sstable/blob"
      27              :         "github.com/cockroachdb/pebble/sstable/block"
      28              :         "github.com/cockroachdb/pebble/wal"
      29              :         "github.com/cockroachdb/redact"
      30              :         "github.com/prometheus/client_golang/prometheus"
      31              :         "golang.org/x/exp/constraints"
      32              : )
      33              : 
      34              : // CacheMetrics holds metrics for the block and file cache.
      35              : type CacheMetrics = cache.Metrics
      36              : 
      37              : // FilterMetrics holds metrics for the filter policy
      38              : type FilterMetrics = sstable.FilterMetrics
      39              : 
      40              : // ThroughputMetric is a cumulative throughput metric. See the detailed
      41              : // comment in base.
      42              : type ThroughputMetric = base.ThroughputMetric
      43              : 
      44              : // SecondaryCacheMetrics holds metrics for the persistent secondary cache
      45              : // that caches commonly accessed blocks from blob storage on a local
      46              : // file system.
      47              : type SecondaryCacheMetrics = sharedcache.Metrics
      48              : 
      49              : // LevelMetrics holds per-level metrics such as the number of files and total
      50              : // size of the files, and compaction related metrics.
      51              : type LevelMetrics struct {
      52              :         // The number of sublevels within the level. The sublevel count corresponds
      53              :         // to the read amplification for the level. An empty level will have a
      54              :         // sublevel count of 0, implying no read amplification. Only L0 will have
      55              :         // a sublevel count other than 0 or 1.
      56              :         Sublevels int32
      57              :         // The total count of sstables in the level.
      58              :         TablesCount int64
      59              :         // The total size in bytes of the sstables in the level. Note that if tables
      60              :         // contain references to blob files, this quantity does not include the the
      61              :         // size of the blob files or the referenced values.
      62              :         TablesSize int64
      63              :         // The total number of virtual sstables in the level.
      64              :         VirtualTablesCount uint64
      65              :         // The total size of the virtual sstables in the level.
      66              :         VirtualTablesSize uint64
      67              :         // The estimated total physical size of all blob references across all
      68              :         // sstables in the level. The physical size is estimated based on the size
      69              :         // of referenced values and the values' blob file's compression ratios.
      70              :         EstimatedReferencesSize uint64
      71              :         // The level's compaction score, used to rank levels (0 if the level doesn't
      72              :         // need compaction). See candidateLevelInfo.
      73              :         Score float64
      74              :         // The level's fill factor (the ratio between the size of the level and the
      75              :         // ideal size). See candidateLevelInfo.
      76              :         FillFactor float64
      77              :         // The level's compensated fill factor. See candidateLevelInfo.
      78              :         CompensatedFillFactor float64
      79              :         // The number of incoming bytes from other levels' sstables read during
      80              :         // compactions. This excludes bytes moved and bytes ingested. For L0 this is
      81              :         // the bytes written to the WAL.
      82              :         TableBytesIn uint64
      83              :         // The number of sstable bytes ingested. The sibling metric for tables is
      84              :         // TablesIngested.
      85              :         TableBytesIngested uint64
      86              :         // The number of sstable bytes moved into the level by a "move" compaction.
      87              :         // The sibling metric for tables is TablesMoved.
      88              :         TableBytesMoved uint64
      89              :         // The number of bytes read for compactions at the level. This includes bytes
      90              :         // read from other levels (BytesIn), as well as bytes read for the level.
      91              :         TableBytesRead uint64
      92              :         // The number of bytes written to sstables during compactions. The sibling
      93              :         // metric for tables is TablesCompacted. This metric may be summed with
      94              :         // BytesFlushed to compute the total bytes written for the level.
      95              :         TableBytesCompacted uint64
      96              :         // The number of bytes written to sstables during flushes. The sibling
      97              :         // metrics for tables is TablesFlushed. This metric is always zero for all
      98              :         // levels other than L0.
      99              :         TableBytesFlushed uint64
     100              :         // The number of sstables compacted to this level.
     101              :         TablesCompacted uint64
     102              :         // The number of sstables flushed to this level.
     103              :         TablesFlushed uint64
     104              :         // The number of sstables ingested into the level.
     105              :         TablesIngested uint64
     106              :         // The number of sstables moved to this level by a "move" compaction.
     107              :         TablesMoved uint64
     108              :         // The number of sstables deleted in a level by a delete-only compaction.
     109              :         TablesDeleted uint64
     110              :         // The number of sstables excised in a level by a delete-only compaction.
     111              :         TablesExcised uint64
     112              :         // BlobBytesRead is the volume of physical bytes read from blob files during
     113              :         // compactions outputting into this level.
     114              :         BlobBytesRead uint64
     115              :         // BlobBytesCompacted is the number of bytes written to blob files while
     116              :         // compacting sstables in this level.
     117              :         BlobBytesCompacted uint64
     118              :         // BlobBytesFlushed is the number of bytes written to blob files while
     119              :         // flushing sstables. This metric is always zero for all levels other than
     120              :         // L0.
     121              :         BlobBytesFlushed uint64
     122              : 
     123              :         MultiLevel struct {
     124              :                 // TableBytesInTop are the total bytes in a multilevel compaction coming
     125              :                 // from the top level.
     126              :                 TableBytesInTop uint64
     127              : 
     128              :                 // TableBytesIn, exclusively for multiLevel compactions.
     129              :                 TableBytesIn uint64
     130              : 
     131              :                 // TableBytesRead, exclusively for multilevel compactions.
     132              :                 TableBytesRead uint64
     133              :         }
     134              : 
     135              :         // Additional contains misc additional metrics that are not always printed.
     136              :         Additional struct {
     137              :                 // The sum of FileProperties.ValueBlocksSize for all the sstables in this
     138              :                 // level. Printed by LevelMetrics.format iff there is at least one level
     139              :                 // with a non-zero value.
     140              :                 ValueBlocksSize uint64
     141              :                 // Cumulative metrics about bytes written to data blocks and value blocks,
     142              :                 // via compactions (except move compactions) or flushes. Not printed by
     143              :                 // LevelMetrics.format, but are available to sophisticated clients.
     144              :                 BytesWrittenDataBlocks  uint64
     145              :                 BytesWrittenValueBlocks uint64
     146              :         }
     147              : }
     148              : 
     149              : // AggregateSize returns an estimated physical size of the level's sstables and
     150              : // their referenced values stored in blob files. The size of physical sstables
     151              : // is exactly known. Virtual sstables' sizes are estimated, and the size of
     152              : // values stored in blob files is estimated based on the volume of referenced
     153              : // data and the blob file's compression ratio.
     154            2 : func (m *LevelMetrics) AggregateSize() int64 {
     155            2 :         return m.TablesSize + int64(m.EstimatedReferencesSize)
     156            2 : }
     157              : 
     158              : // Add updates the counter metrics for the level.
     159            2 : func (m *LevelMetrics) Add(u *LevelMetrics) {
     160            2 :         m.Sublevels += u.Sublevels
     161            2 :         m.TablesCount += u.TablesCount
     162            2 :         m.TablesSize += u.TablesSize
     163            2 :         m.VirtualTablesCount += u.VirtualTablesCount
     164            2 :         m.VirtualTablesSize += u.VirtualTablesSize
     165            2 :         m.EstimatedReferencesSize += u.EstimatedReferencesSize
     166            2 :         m.TableBytesIn += u.TableBytesIn
     167            2 :         m.TableBytesIngested += u.TableBytesIngested
     168            2 :         m.TableBytesMoved += u.TableBytesMoved
     169            2 :         m.TableBytesRead += u.TableBytesRead
     170            2 :         m.TableBytesCompacted += u.TableBytesCompacted
     171            2 :         m.TableBytesFlushed += u.TableBytesFlushed
     172            2 :         m.TablesCompacted += u.TablesCompacted
     173            2 :         m.TablesFlushed += u.TablesFlushed
     174            2 :         m.TablesIngested += u.TablesIngested
     175            2 :         m.TablesMoved += u.TablesMoved
     176            2 :         m.BlobBytesCompacted += u.BlobBytesCompacted
     177            2 :         m.BlobBytesFlushed += u.BlobBytesFlushed
     178            2 :         m.BlobBytesRead += u.BlobBytesRead
     179            2 :         m.MultiLevel.TableBytesInTop += u.MultiLevel.TableBytesInTop
     180            2 :         m.MultiLevel.TableBytesRead += u.MultiLevel.TableBytesRead
     181            2 :         m.MultiLevel.TableBytesIn += u.MultiLevel.TableBytesIn
     182            2 :         m.Additional.BytesWrittenDataBlocks += u.Additional.BytesWrittenDataBlocks
     183            2 :         m.Additional.BytesWrittenValueBlocks += u.Additional.BytesWrittenValueBlocks
     184            2 :         m.Additional.ValueBlocksSize += u.Additional.ValueBlocksSize
     185            2 : }
     186              : 
     187              : // WriteAmp computes the write amplification for compactions at this
     188              : // level.
     189              : //
     190              : // The write amplification is computed as the quantity of physical bytes written
     191              : // divided by the quantity of logical bytes written.
     192              : //
     193              : // Concretely, it's computed as:
     194              : //
     195              : //      TableBytesFlushed + TableBytesCompacted + BlobBytesFlushed + BlobBytesCompacted
     196              : //      -------------------------------------------------------------------------------
     197              : //                                    TableBytesIn
     198            1 : func (m *LevelMetrics) WriteAmp() float64 {
     199            1 :         if m.TableBytesIn == 0 {
     200            1 :                 return 0
     201            1 :         }
     202            1 :         return float64(m.TableBytesFlushed+m.TableBytesCompacted+m.BlobBytesFlushed+m.BlobBytesCompacted) /
     203            1 :                 float64(m.TableBytesIn)
     204              : }
     205              : 
     206              : var categoryCompaction = block.RegisterCategory("pebble-compaction", block.NonLatencySensitiveQoSLevel)
     207              : var categoryIngest = block.RegisterCategory("pebble-ingest", block.LatencySensitiveQoSLevel)
     208              : var categoryGet = block.RegisterCategory("pebble-get", block.LatencySensitiveQoSLevel)
     209              : 
     210              : // Metrics holds metrics for various subsystems of the DB such as the Cache,
     211              : // Compactions, WAL, and per-Level metrics.
     212              : //
     213              : // TODO(peter): The testing of these metrics is relatively weak. There should
     214              : // be testing that performs various operations on a DB and verifies that the
     215              : // metrics reflect those operations.
     216              : type Metrics struct {
     217              :         BlockCache CacheMetrics
     218              : 
     219              :         Compact struct {
     220              :                 // The total number of compactions, and per-compaction type counts.
     221              :                 Count                 int64
     222              :                 DefaultCount          int64
     223              :                 DeleteOnlyCount       int64
     224              :                 ElisionOnlyCount      int64
     225              :                 CopyCount             int64
     226              :                 MoveCount             int64
     227              :                 ReadCount             int64
     228              :                 TombstoneDensityCount int64
     229              :                 RewriteCount          int64
     230              :                 MultiLevelCount       int64
     231              :                 BlobFileRewriteCount  int64
     232              :                 VirtualRewriteCount   int64
     233              :                 // An estimate of the number of bytes that need to be compacted for the LSM
     234              :                 // to reach a stable state.
     235              :                 EstimatedDebt uint64
     236              :                 // Number of bytes present in sstables being written by in-progress
     237              :                 // compactions. This value will be zero if there are no in-progress
     238              :                 // compactions.
     239              :                 InProgressBytes int64
     240              :                 // Number of compactions that are in-progress.
     241              :                 NumInProgress int64
     242              :                 // Number of compactions that were cancelled.
     243              :                 CancelledCount int64
     244              :                 // CancelledBytes the number of bytes written by compactions that were
     245              :                 // cancelled.
     246              :                 CancelledBytes int64
     247              :                 // Total number of compactions that hit an error.
     248              :                 FailedCount int64
     249              :                 // NumProblemSpans is the current (instantaneous) count of "problem spans"
     250              :                 // which temporarily block compactions.
     251              :                 NumProblemSpans int
     252              :                 // MarkedFiles is a count of files that are marked for
     253              :                 // compaction. Such files are compacted in a rewrite compaction
     254              :                 // when no other compactions are picked.
     255              :                 MarkedFiles int
     256              :                 // Duration records the cumulative duration of all compactions since the
     257              :                 // database was opened.
     258              :                 Duration time.Duration
     259              :         }
     260              : 
     261              :         Ingest struct {
     262              :                 // The total number of ingestions
     263              :                 Count uint64
     264              :                 // The number of excise operations during ingestion
     265              :                 ExciseIngestCount int64
     266              :         }
     267              : 
     268              :         Flush struct {
     269              :                 // The total number of flushes.
     270              :                 Count int64
     271              :                 // TODO(sumeer): the IdleDuration in this metric is flawed. It only
     272              :                 // measures idle duration when a flush finishes, representing the idleness
     273              :                 // before the start of a flush. So computing deltas over this metric over
     274              :                 // some time interval D may observe the sum of IdleDuration+WorkDuration
     275              :                 // to be either much smaller or much larger than D.
     276              :                 WriteThroughput ThroughputMetric
     277              :                 // Number of flushes that are in-progress. In the current implementation
     278              :                 // this will always be zero or one.
     279              :                 NumInProgress int64
     280              :                 // AsIngestCount is a monotonically increasing counter of flush operations
     281              :                 // handling ingested tables.
     282              :                 AsIngestCount uint64
     283              :                 // AsIngestCount is a monotonically increasing counter of tables ingested as
     284              :                 // flushables.
     285              :                 AsIngestTableCount uint64
     286              :                 // AsIngestBytes is a monotonically increasing counter of the bytes flushed
     287              :                 // for flushables that originated as ingestion operations.
     288              :                 AsIngestBytes uint64
     289              :         }
     290              : 
     291              :         Filter FilterMetrics
     292              : 
     293              :         Levels [numLevels]LevelMetrics
     294              : 
     295              :         MemTable struct {
     296              :                 // The number of bytes allocated by memtables and large (flushable)
     297              :                 // batches.
     298              :                 Size uint64
     299              :                 // The count of memtables.
     300              :                 Count int64
     301              :                 // The number of bytes present in zombie memtables which are no longer
     302              :                 // referenced by the current DB state. An unbounded number of memtables
     303              :                 // may be zombie if they're still in use by an iterator. One additional
     304              :                 // memtable may be zombie if it's no longer in use and waiting to be
     305              :                 // recycled.
     306              :                 ZombieSize uint64
     307              :                 // The count of zombie memtables.
     308              :                 ZombieCount int64
     309              :         }
     310              : 
     311              :         Keys struct {
     312              :                 // The approximate count of internal range key set keys in the database.
     313              :                 RangeKeySetsCount uint64
     314              :                 // The approximate count of internal tombstones (DEL, SINGLEDEL and
     315              :                 // RANGEDEL key kinds) within the database.
     316              :                 TombstoneCount uint64
     317              :                 // A cumulative total number of missized DELSIZED keys encountered by
     318              :                 // compactions since the database was opened.
     319              :                 MissizedTombstonesCount uint64
     320              :         }
     321              : 
     322              :         Snapshots struct {
     323              :                 // The number of currently open snapshots.
     324              :                 Count int
     325              :                 // The sequence number of the earliest, currently open snapshot.
     326              :                 EarliestSeqNum base.SeqNum
     327              :                 // A running tally of keys written to sstables during flushes or
     328              :                 // compactions that would've been elided if it weren't for open
     329              :                 // snapshots.
     330              :                 PinnedKeys uint64
     331              :                 // A running cumulative sum of the size of keys and values written to
     332              :                 // sstables during flushes or compactions that would've been elided if
     333              :                 // it weren't for open snapshots.
     334              :                 PinnedSize uint64
     335              :         }
     336              : 
     337              :         Table struct {
     338              :                 // The number of bytes present in obsolete tables which are no longer
     339              :                 // referenced by the current DB state or any open iterators.
     340              :                 ObsoleteSize uint64
     341              :                 // The count of obsolete tables.
     342              :                 ObsoleteCount int64
     343              :                 // The number of bytes present in zombie tables which are no longer
     344              :                 // referenced by the current DB state but are still in use by an iterator.
     345              :                 ZombieSize uint64
     346              :                 // The count of zombie tables.
     347              :                 ZombieCount int64
     348              :                 // The count of sstables backing virtual tables.
     349              :                 BackingTableCount uint64
     350              :                 // The sum of the sizes of the BackingTableCount sstables that are backing virtual tables.
     351              :                 BackingTableSize uint64
     352              :                 // Compression statistics for sstable data (does not include blob files).
     353              :                 Compression CompressionMetrics
     354              : 
     355              :                 // Local file sizes.
     356              :                 Local struct {
     357              :                         // LiveSize is the number of bytes in live tables.
     358              :                         LiveSize uint64
     359              :                         // LiveCount is the number of live tables.
     360              :                         LiveCount uint64
     361              :                         // ObsoleteSize is the number of bytes in obsolete tables.
     362              :                         ObsoleteSize uint64
     363              :                         // ObsoleteCount is the number of obsolete tables.
     364              :                         ObsoleteCount uint64
     365              :                         // ZombieSize is the number of bytes in zombie tables.
     366              :                         ZombieSize uint64
     367              :                         // ZombieCount is the number of zombie tables.
     368              :                         ZombieCount uint64
     369              :                 }
     370              : 
     371              :                 // Garbage bytes.
     372              :                 Garbage struct {
     373              :                         // PointDeletionsBytesEstimate is the estimated file bytes that will be
     374              :                         // saved by compacting all point deletions. This is dependent on table
     375              :                         // stats collection, so can be very incomplete until
     376              :                         // InitialStatsCollectionComplete becomes true.
     377              :                         PointDeletionsBytesEstimate uint64
     378              :                         // RangeDeletionsBytesEstimate is the estimated file bytes that will be
     379              :                         // saved by compacting all range deletions. This is dependent on table
     380              :                         // stats collection, so can be very incomplete until
     381              :                         // InitialStatsCollectionComplete becomes true.
     382              :                         RangeDeletionsBytesEstimate uint64
     383              :                 }
     384              : 
     385              :                 // Whether the initial stats collection (for existing tables on Open) is
     386              :                 // complete.
     387              :                 InitialStatsCollectionComplete bool
     388              :                 // The count of recently created sstables that need stats collection. This
     389              :                 // does not include sstables that existed when the DB was opened, so the
     390              :                 // value is only useful when InitialStatsCollectionComplete is true.
     391              :                 PendingStatsCollectionCount int64
     392              :         }
     393              : 
     394              :         BlobFiles struct {
     395              :                 // The count of all live blob files.
     396              :                 LiveCount uint64
     397              :                 // The physical file size of all live blob files.
     398              :                 LiveSize uint64
     399              :                 // ValueSize is the sum of the length of the uncompressed values in all
     400              :                 // live (referenced by some sstable(s) within the current version) blob
     401              :                 // files. ValueSize may be greater than LiveSize when compression is
     402              :                 // effective. ValueSize includes bytes in live blob files that are not
     403              :                 // actually reachable by any sstable key. If any value within the blob
     404              :                 // file is reachable by a key in a live sstable, then the entirety of
     405              :                 // the blob file's values are included within ValueSize.
     406              :                 ValueSize uint64
     407              :                 // ReferencedValueSize is the sum of the length of the uncompressed
     408              :                 // values (in all live blob files) that are still referenced by keys
     409              :                 // within live tables. Over the lifetime of a blob file, a blob file's
     410              :                 // references are removed as some compactions choose to write new blob
     411              :                 // files containing the same values or keys referencing the file's
     412              :                 // values are deleted. ReferencedValueSize accounts the volume of bytes
     413              :                 // that are actually reachable by some key in a live table.
     414              :                 //
     415              :                 // The difference between ValueSize and ReferencedValueSize is
     416              :                 // (uncompressed) space amplification that could be reclaimed if all
     417              :                 // blob files were rewritten, discarding values that are no longer
     418              :                 // referenced by any keys in any sstables within the current version.
     419              :                 ReferencedValueSize uint64
     420              :                 // ReferencedBackingValueSize is the sum of the length of the uncompressed
     421              :                 // values (in all live blob files) that are still referenced by keys
     422              :                 // within backing tables. Note that this value is an overestimate because
     423              :                 // each virtual table will contribute their backing table's referenced
     424              :                 // value sizes.
     425              :                 ReferencedBackingValueSize uint64
     426              :                 // The count of all obsolete blob files.
     427              :                 ObsoleteCount uint64
     428              :                 // The physical size of all obsolete blob files.
     429              :                 ObsoleteSize uint64
     430              :                 // The count of all zombie blob files.
     431              :                 ZombieCount uint64
     432              :                 // The physical size of all zombie blob files.
     433              :                 ZombieSize uint64
     434              :                 // Local file sizes.
     435              :                 Local struct {
     436              :                         // LiveSize is the physical size of local live blob files.
     437              :                         LiveSize uint64
     438              :                         // LiveCount is the number of local live blob files.
     439              :                         LiveCount uint64
     440              :                         // ObsoleteSize is the physical size of local obsolete blob files.
     441              :                         ObsoleteSize uint64
     442              :                         // ObsoleteCount is the number of local obsolete blob files.
     443              :                         ObsoleteCount uint64
     444              :                         // ZombieSize is the physical size of local zombie blob files.
     445              :                         ZombieSize uint64
     446              :                         // ZombieCount is the number of local zombie blob files.
     447              :                         ZombieCount uint64
     448              :                 }
     449              : 
     450              :                 Compression CompressionMetrics
     451              :         }
     452              : 
     453              :         FileCache FileCacheMetrics
     454              : 
     455              :         // Count of the number of open sstable iterators.
     456              :         TableIters int64
     457              :         // Uptime is the total time since this DB was opened.
     458              :         Uptime time.Duration
     459              : 
     460              :         WAL struct {
     461              :                 // Number of live WAL files.
     462              :                 Files int64
     463              :                 // Number of obsolete WAL files.
     464              :                 ObsoleteFiles int64
     465              :                 // Physical size of the obsolete WAL files.
     466              :                 ObsoletePhysicalSize uint64
     467              :                 // Size of the live data in the WAL files. Note that with WAL file
     468              :                 // recycling this is less than the actual on-disk size of the WAL files.
     469              :                 Size uint64
     470              :                 // Physical size of the WAL files on-disk. With WAL file recycling,
     471              :                 // this is greater than the live data in WAL files.
     472              :                 //
     473              :                 // TODO(sumeer): it seems this does not include ObsoletePhysicalSize.
     474              :                 // Should the comment be updated?
     475              :                 PhysicalSize uint64
     476              :                 // Number of logical bytes written to the WAL.
     477              :                 BytesIn uint64
     478              :                 // Number of bytes written to the WAL.
     479              :                 BytesWritten uint64
     480              :                 // Failover contains failover stats. Empty if failover is not enabled.
     481              :                 Failover wal.FailoverStats
     482              :         }
     483              : 
     484              :         LogWriter struct {
     485              :                 FsyncLatency prometheus.Histogram
     486              :                 record.LogWriterMetrics
     487              :         }
     488              : 
     489              :         CategoryStats []block.CategoryStatsAggregate
     490              : 
     491              :         SecondaryCacheMetrics SecondaryCacheMetrics
     492              : 
     493              :         private struct {
     494              :                 optionsFileSize  uint64
     495              :                 manifestFileSize uint64
     496              :         }
     497              : 
     498              :         manualMemory manual.Metrics
     499              : }
     500              : 
     501              : // CompressionMetrics contains compression metrics for sstables or blob files.
     502              : type CompressionMetrics struct {
     503              :         // NoCompressionBytes is the total number of bytes in files that do are not
     504              :         // compressed. Data can be uncompressed when 1) compression is disabled; 2)
     505              :         // for certain special types of blocks; and 3) for blocks that are not
     506              :         // compressible.
     507              :         NoCompressionBytes uint64
     508              :         // CompressedBytesWithoutStats is the total number of bytes in files that do
     509              :         // not encode compression statistics (or for which there are no statistics
     510              :         // yet).
     511              :         CompressedBytesWithoutStats uint64
     512              :         Snappy                      CompressionStatsForSetting
     513              :         MinLZ                       CompressionStatsForSetting
     514              :         Zstd                        CompressionStatsForSetting
     515              : }
     516              : 
     517              : type CompressionStatsForSetting = block.CompressionStatsForSetting
     518              : 
     519            1 : func (cm *CompressionMetrics) Add(stats *block.CompressionStats) {
     520            1 :         for s, cs := range stats.All() {
     521            1 :                 switch s.Algorithm {
     522            1 :                 case compression.NoAlgorithm:
     523            1 :                         cm.NoCompressionBytes += cs.UncompressedBytes
     524            1 :                 case compression.Snappy:
     525            1 :                         cm.Snappy.Add(cs)
     526            0 :                 case compression.MinLZ:
     527            0 :                         cm.MinLZ.Add(cs)
     528            0 :                 case compression.Zstd:
     529            0 :                         cm.Zstd.Add(cs)
     530              :                 }
     531              :         }
     532              : }
     533              : 
     534            1 : func (cm *CompressionMetrics) MergeWith(o *CompressionMetrics) {
     535            1 :         cm.NoCompressionBytes += o.NoCompressionBytes
     536            1 :         cm.CompressedBytesWithoutStats += o.CompressedBytesWithoutStats
     537            1 :         cm.Snappy.Add(o.Snappy)
     538            1 :         cm.MinLZ.Add(o.MinLZ)
     539            1 :         cm.Zstd.Add(o.Zstd)
     540            1 : }
     541              : 
     542              : var (
     543              :         // FsyncLatencyBuckets are prometheus histogram buckets suitable for a histogram
     544              :         // that records latencies for fsyncs.
     545              :         FsyncLatencyBuckets = append(
     546              :                 prometheus.LinearBuckets(0.0, float64(time.Microsecond*100), 50),
     547              :                 prometheus.ExponentialBucketsRange(float64(time.Millisecond*5), float64(10*time.Second), 50)...,
     548              :         )
     549              : 
     550              :         // SecondaryCacheIOBuckets exported to enable exporting from package pebble to
     551              :         // enable exporting metrics with below buckets in CRDB.
     552              :         SecondaryCacheIOBuckets = sharedcache.IOBuckets
     553              :         // SecondaryCacheChannelWriteBuckets exported to enable exporting from package
     554              :         // pebble to enable exporting metrics with below buckets in CRDB.
     555              :         SecondaryCacheChannelWriteBuckets = sharedcache.ChannelWriteBuckets
     556              : )
     557              : 
     558              : // DiskSpaceUsage returns the total disk space used by the database in bytes,
     559              : // including live and obsolete files. This only includes local files, i.e.,
     560              : // remote files (as known to objstorage.Provider) are not included.
     561            1 : func (m *Metrics) DiskSpaceUsage() uint64 {
     562            1 :         var usageBytes uint64
     563            1 :         usageBytes += m.WAL.PhysicalSize
     564            1 :         usageBytes += m.WAL.ObsoletePhysicalSize
     565            1 :         usageBytes += m.Table.Local.LiveSize
     566            1 :         usageBytes += m.Table.Local.ObsoleteSize
     567            1 :         usageBytes += m.Table.Local.ZombieSize
     568            1 :         usageBytes += m.BlobFiles.Local.LiveSize
     569            1 :         usageBytes += m.BlobFiles.Local.ObsoleteSize
     570            1 :         usageBytes += m.BlobFiles.Local.ZombieSize
     571            1 :         usageBytes += m.private.optionsFileSize
     572            1 :         usageBytes += m.private.manifestFileSize
     573            1 :         // TODO(sumeer): InProgressBytes does not distinguish between local and
     574            1 :         // remote files. This causes a small error. Fix.
     575            1 :         usageBytes += uint64(m.Compact.InProgressBytes)
     576            1 :         return usageBytes
     577            1 : }
     578              : 
     579              : // NumVirtual is the number of virtual sstables in the latest version
     580              : // summed over every level in the lsm.
     581            1 : func (m *Metrics) NumVirtual() uint64 {
     582            1 :         var n uint64
     583            1 :         for _, level := range m.Levels {
     584            1 :                 n += level.VirtualTablesCount
     585            1 :         }
     586            1 :         return n
     587              : }
     588              : 
     589              : // VirtualSize is the sum of the sizes of the virtual sstables in the
     590              : // latest version. BackingTableSize - VirtualSize gives an estimate for
     591              : // the space amplification caused by not compacting virtual sstables.
     592            1 : func (m *Metrics) VirtualSize() uint64 {
     593            1 :         var size uint64
     594            1 :         for _, level := range m.Levels {
     595            1 :                 size += level.VirtualTablesSize
     596            1 :         }
     597            1 :         return size
     598              : }
     599              : 
     600              : // ReadAmp returns the current read amplification of the database.
     601              : // It's computed as the number of sublevels in L0 + the number of non-empty
     602              : // levels below L0.
     603            1 : func (m *Metrics) ReadAmp() int {
     604            1 :         var ramp int32
     605            1 :         for _, l := range m.Levels {
     606            1 :                 ramp += l.Sublevels
     607            1 :         }
     608            1 :         return int(ramp)
     609              : }
     610              : 
     611              : // Total returns the sum of the per-level metrics and WAL metrics.
     612            2 : func (m *Metrics) Total() LevelMetrics {
     613            2 :         var total LevelMetrics
     614            2 :         for level := 0; level < numLevels; level++ {
     615            2 :                 l := &m.Levels[level]
     616            2 :                 total.Add(l)
     617            2 :         }
     618              :         // Compute total bytes-in as the bytes written to the WAL + bytes ingested.
     619            2 :         total.TableBytesIn = m.WAL.BytesWritten + total.TableBytesIngested
     620            2 :         // Add the total bytes-in to the total bytes-flushed. This is to account for
     621            2 :         // the bytes written to the log and bytes written externally and then
     622            2 :         // ingested.
     623            2 :         total.TableBytesFlushed += total.TableBytesIn
     624            2 :         return total
     625              : }
     626              : 
     627              : // RemoteTablesTotal returns the total number of remote tables and their total
     628              : // size. Remote tables are computed as the difference between total tables
     629              : // (live + obsolete + zombie) and local tables.
     630            1 : func (m *Metrics) RemoteTablesTotal() (count uint64, size uint64) {
     631            1 :         var liveTables, liveTableBytes int64
     632            1 :         for level := 0; level < numLevels; level++ {
     633            1 :                 liveTables += m.Levels[level].TablesCount
     634            1 :                 liveTableBytes += m.Levels[level].TablesSize
     635            1 :         }
     636            1 :         totalCount := liveTables + m.Table.ObsoleteCount + m.Table.ZombieCount
     637            1 :         localCount := m.Table.Local.LiveCount + m.Table.Local.ObsoleteCount + m.Table.Local.ZombieCount
     638            1 :         remoteCount := uint64(totalCount) - localCount
     639            1 : 
     640            1 :         totalSize := uint64(liveTableBytes) + m.Table.ObsoleteSize + m.Table.ZombieSize
     641            1 :         localSize := m.Table.Local.LiveSize + m.Table.Local.ObsoleteSize + m.Table.Local.ZombieSize
     642            1 :         remoteSize := totalSize - localSize
     643            1 : 
     644            1 :         return remoteCount, remoteSize
     645              : }
     646              : 
     647              : // Assert that Metrics implements redact.SafeFormatter.
     648              : var _ redact.SafeFormatter = (*Metrics)(nil)
     649              : 
     650              : // SafeFormat implements redact.SafeFormatter.
     651            1 : func (m *Metrics) SafeFormat(w redact.SafePrinter, _ rune) {
     652            1 :         w.SafeString(redact.SafeString(m.String()))
     653            1 : }
     654              : 
     655              : var (
     656              :         levelMetricsTableTopHeader = `LSM                             |    vtables   |   value sep   |        |   ingested   |    amp`
     657            2 :         levelMetricsTable          = func() table.Layout[*LevelMetrics] {
     658            2 :                 def := table.Define[*LevelMetrics](
     659            2 :                         table.StringWithTupleIndex("level", 5, table.AlignRight, func(tupleIndex int, m *LevelMetrics) string {
     660            1 :                                 if tupleIndex == manifest.NumLevels {
     661            1 :                                         return "total"
     662            1 :                                 }
     663            1 :                                 return fmt.Sprintf("L%d", tupleIndex)
     664              :                         }),
     665            1 :                         table.Bytes("size", 10, table.AlignRight, func(m *LevelMetrics) uint64 { return uint64(m.TablesSize) + m.EstimatedReferencesSize }),
     666              :                         table.Div(),
     667            1 :                         table.Count("tables", 6, table.AlignRight, func(m *LevelMetrics) int64 { return m.TablesCount }),
     668            1 :                         table.Bytes("size", 5, table.AlignRight, func(m *LevelMetrics) int64 { return m.TablesSize }),
     669              :                         table.Div(),
     670            1 :                         table.Count("count", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.VirtualTablesCount }),
     671            1 :                         table.Count("size", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.VirtualTablesSize }),
     672              :                         table.Div(),
     673            1 :                         table.Bytes("refsz", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.EstimatedReferencesSize }),
     674            1 :                         table.Bytes("valblk", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.Additional.ValueBlocksSize }),
     675              :                         table.Div(),
     676            1 :                         table.Bytes("in", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TableBytesIn }),
     677              :                         table.Div(),
     678            1 :                         table.Count("tables", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TablesIngested }),
     679            1 :                         table.Bytes("size", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TableBytesIngested }),
     680              :                         table.Div(),
     681            1 :                         table.Int("r", 3, table.AlignRight, func(m *LevelMetrics) int { return int(m.Sublevels) }),
     682            1 :                         table.Float("w", 5, table.AlignRight, func(m *LevelMetrics) float64 { return m.WriteAmp() }),
     683              :                 )
     684            2 :                 def.FilterFn = func(tupleIndex int, m *LevelMetrics) (passed bool) {
     685            1 :                         return m.TablesCount != 0 || m.VirtualTablesCount != 0 || m.TableBytesIn != 0 || m.TablesIngested != 0
     686            1 :                 }
     687            2 :                 return def
     688              :         }()
     689              :         levelCompactionMetricsTableTopHeader = `COMPACTIONS               |     moved    |     multilevel    |     read     |       written`
     690            2 :         compactionLevelMetricsTable          = func() table.Layout[*LevelMetrics] {
     691            2 :                 def := table.Define[*LevelMetrics](
     692            2 :                         table.StringWithTupleIndex("level", 5, table.AlignRight, func(tupleIndex int, m *LevelMetrics) string {
     693            1 :                                 if tupleIndex == manifest.NumLevels {
     694            1 :                                         return "total"
     695            1 :                                 }
     696            1 :                                 return fmt.Sprintf("L%d", tupleIndex)
     697              :                         }),
     698              :                         table.Div(),
     699            1 :                         table.Float("score", 5, table.AlignRight, func(m *LevelMetrics) float64 { return m.Score }),
     700            1 :                         table.Float("ff", 5, table.AlignRight, func(m *LevelMetrics) float64 { return m.FillFactor }),
     701            1 :                         table.Float("cff", 5, table.AlignRight, func(m *LevelMetrics) float64 { return m.CompensatedFillFactor }),
     702              :                         table.Div(),
     703            1 :                         table.Count("tables", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TablesMoved }),
     704            1 :                         table.Bytes("size", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TableBytesMoved }),
     705              :                         table.Div(),
     706            1 :                         table.Bytes("top", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.MultiLevel.TableBytesInTop }),
     707            1 :                         table.Bytes("in", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.MultiLevel.TableBytesIn }),
     708            1 :                         table.Bytes("read", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.MultiLevel.TableBytesRead }),
     709              :                         table.Div(),
     710            1 :                         table.Bytes("tables", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TableBytesRead }),
     711            1 :                         table.Bytes("blob", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.BlobBytesRead }),
     712              :                         table.Div(),
     713            1 :                         table.Count("tables", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TablesFlushed + m.TablesCompacted }),
     714            1 :                         table.Bytes("sstsz", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TableBytesFlushed + m.TableBytesCompacted }),
     715            1 :                         table.Bytes("blobsz", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.BlobBytesFlushed + m.BlobBytesCompacted }),
     716              :                 )
     717            2 :                 def.FilterFn = func(tupleIndex int, m *LevelMetrics) (passed bool) {
     718            1 :                         return !math.IsNaN(m.Score) || m.FillFactor != 0 || m.TablesMoved != 0 || m.MultiLevel.TableBytesInTop != 0 ||
     719            1 :                                 m.MultiLevel.TableBytesIn != 0 || m.MultiLevel.TableBytesRead != 0 || m.BlobBytesRead != 0 ||
     720            1 :                                 m.TablesFlushed != 0 || m.TablesCompacted != 0 || m.BlobBytesFlushed != 0 || m.BlobBytesCompacted != 0
     721            1 :                 }
     722            2 :                 return def
     723              :         }()
     724              :         compactionKindTable = table.Define[*Metrics](
     725            1 :                 table.String("kind", 5, table.AlignRight, func(m *Metrics) string { return "count" }),
     726              :                 table.Div(),
     727            1 :                 table.Int64("default", 7, table.AlignRight, func(m *Metrics) int64 { return m.Compact.DefaultCount }),
     728            1 :                 table.Int64("delete", 7, table.AlignRight, func(m *Metrics) int64 { return m.Compact.DeleteOnlyCount }),
     729            1 :                 table.Int64("elision", 8, table.AlignRight, func(m *Metrics) int64 { return m.Compact.ElisionOnlyCount }),
     730            1 :                 table.Int64("move", 5, table.AlignRight, func(m *Metrics) int64 { return m.Compact.MoveCount }),
     731            1 :                 table.Int64("read", 5, table.AlignRight, func(m *Metrics) int64 { return m.Compact.ReadCount }),
     732            1 :                 table.Int64("tomb", 5, table.AlignRight, func(m *Metrics) int64 { return m.Compact.TombstoneDensityCount }),
     733            1 :                 table.Int64("rewrite", 8, table.AlignRight, func(m *Metrics) int64 { return m.Compact.RewriteCount }),
     734            1 :                 table.Int64("copy", 5, table.AlignRight, func(m *Metrics) int64 { return m.Compact.CopyCount }),
     735            1 :                 table.Int64("multi", 6, table.AlignRight, func(m *Metrics) int64 { return m.Compact.MultiLevelCount }),
     736            1 :                 table.Int64("blob", 5, table.AlignRight, func(m *Metrics) int64 { return m.Compact.BlobFileRewriteCount }),
     737            1 :                 table.Int64("virtual", 7, table.AlignRight, func(m *Metrics) int64 { return m.Compact.VirtualRewriteCount }),
     738              :         )
     739              :         commitPipelineInfoTableTopHeader = `COMMIT PIPELINE`
     740              :         commitPipelineInfoTableSubHeader = `               wals                |              memtables              |       ingestions`
     741              :         commitPipelineInfoTable          = table.Define[commitPipelineInfo](
     742            1 :                 table.String("files", 9, table.AlignRight, func(i commitPipelineInfo) string { return i.files }),
     743              :                 table.Div(),
     744            1 :                 table.String("written", 10, table.AlignRight, func(i commitPipelineInfo) string { return i.written }),
     745              :                 table.Div(),
     746            1 :                 table.String("overhead", 9, table.AlignRight, func(i commitPipelineInfo) string { return i.overhead }),
     747              :                 table.Div(),
     748            1 :                 table.String("flushes", 9, table.AlignRight, func(i commitPipelineInfo) string { return i.flushes }),
     749              :                 table.Div(),
     750            1 :                 table.String("live", 10, table.AlignRight, func(i commitPipelineInfo) string { return i.live }),
     751              :                 table.Div(),
     752            1 :                 table.String("zombie", 10, table.AlignRight, func(i commitPipelineInfo) string { return i.zombie }),
     753              :                 table.Div(),
     754            1 :                 table.String("total", 9, table.AlignRight, func(i commitPipelineInfo) string { return i.total }),
     755              :                 table.Div(),
     756            1 :                 table.String("flushable", 11, table.AlignRight, func(i commitPipelineInfo) string { return i.flushable }),
     757              :         )
     758              :         blockCacheInfoTableTopHeader = `BLOCK CACHE`
     759              :         blockCacheInfoTable          = table.Define[blockCacheInfo](
     760            2 :                 func() []table.Element {
     761            2 :                         e := make([]table.Element, 0, 3+cache.NumCategories)
     762            2 :                         e = append(e, table.String("level", 6, table.AlignCenter, func(i blockCacheInfo) string { return i.level }))
     763            2 :                         e = append(e, table.String("all", 11, table.AlignCenter, func(i blockCacheInfo) string { return i.missRate }))
     764            2 :                         e = append(e, table.Div())
     765            2 :                         for c := range cache.Categories {
     766            2 :                                 e = append(e, table.String(c.String(), 12, table.AlignCenter, func(i blockCacheInfo) string {
     767            1 :                                         return i.perCategory[c]
     768            1 :                                 }))
     769              :                         }
     770            2 :                         return e
     771              :                 }()...,
     772              :         )
     773              :         iteratorInfoTableTopHeader = `ITERATORS`
     774              :         iteratorInfoTableSubHeader = `        file cache        |    filter   |    open     |    open`
     775              :         iteratorInfoTable          = table.Define[iteratorInfo](
     776            1 :                 table.String("entries", 12, table.AlignRight, func(i iteratorInfo) string { return i.fcEntries }),
     777              :                 table.Div(),
     778            1 :                 table.String("hit rate", 10, table.AlignRight, func(i iteratorInfo) string { return i.fcHitRate }),
     779              :                 table.Div(),
     780            1 :                 table.String("utilization", 11, table.AlignRight, func(i iteratorInfo) string { return i.bloomFilterUtil }),
     781              :                 table.Div(),
     782            1 :                 table.String("sst iters ", 11, table.AlignRight, func(i iteratorInfo) string { return i.sstableItersOpen }),
     783              :                 table.Div(),
     784            1 :                 table.String("snapshots ", 11, table.AlignRight, func(i iteratorInfo) string { return i.snapshotsOpen }),
     785              :         )
     786              :         fileInfoTableHeader = `FILES                 tables                       |       blob files        |     blob values`
     787              :         fileInfoTable       = table.Define[tableAndBlobInfo](
     788            1 :                 table.String("stats prog", 13, table.AlignRight, func(i tableAndBlobInfo) string { return i.tableInfo.stats }),
     789              :                 table.Div(),
     790            1 :                 table.String("backing", 10, table.AlignRight, func(i tableAndBlobInfo) string { return i.tableInfo.backing }),
     791              :                 table.Div(),
     792            1 :                 table.String("zombie", 21, table.AlignRight, func(i tableAndBlobInfo) string { return i.tableInfo.zombie }),
     793              :                 table.Div(),
     794            1 :                 table.String("live", 10, table.AlignRight, func(i tableAndBlobInfo) string { return i.blobInfo.live }),
     795              :                 table.Div(),
     796            1 :                 table.String("zombie", 10, table.AlignRight, func(i tableAndBlobInfo) string { return i.blobInfo.zombie }),
     797              :                 table.Div(),
     798            1 :                 table.String("total", 6, table.AlignRight, func(i tableAndBlobInfo) string { return i.blobInfo.total }),
     799              :                 table.Div(),
     800            1 :                 table.String("refed", 10, table.AlignRight, func(i tableAndBlobInfo) string { return i.blobInfo.referenced }),
     801              :         )
     802              :         cgoMemInfoTableHeader = `CGO MEMORY    |          block cache           |                     memtables`
     803              :         cgoMemInfoTable       = table.Define[cgoMemInfo](
     804            1 :                 table.String("tot", 13, table.AlignRight, func(i cgoMemInfo) string { return i.tot }),
     805              :                 table.Div(),
     806            1 :                 table.String("tot", 13, table.AlignRight, func(i cgoMemInfo) string { return i.bcTot }),
     807              :                 table.Div(),
     808            1 :                 table.String("data", 14, table.AlignRight, func(i cgoMemInfo) string { return i.bcData }),
     809              :                 table.Div(),
     810            1 :                 table.String("maps", 15, table.AlignRight, func(i cgoMemInfo) string { return i.bcMaps }),
     811              :                 table.Div(),
     812            1 :                 table.String("ents", 15, table.AlignRight, func(i cgoMemInfo) string { return i.bcEnts }),
     813              :                 table.Div(),
     814            1 :                 table.String("tot", 13, table.AlignRight, func(i cgoMemInfo) string { return i.memtablesTot }),
     815              :         )
     816              :         compactionInfoTableTopHeader = `COMPACTIONS`
     817              :         compactionInfoTable          = table.Define[compactionMetricsInfo](
     818            1 :                 table.String("estimated debt", 17, table.AlignRight, func(i compactionMetricsInfo) string { return i.estimatedDebt }),
     819              :                 table.Div(),
     820            1 :                 table.String("in progress", 17, table.AlignRight, func(i compactionMetricsInfo) string { return i.inProgress }),
     821              :                 table.Div(),
     822            1 :                 table.String("cancelled", 17, table.AlignRight, func(i compactionMetricsInfo) string { return i.cancelled }),
     823              :                 table.Div(),
     824            1 :                 table.String("failed", 17, table.AlignRight, func(i compactionMetricsInfo) string { return fmt.Sprint(i.failed) }),
     825              :                 table.Div(),
     826            1 :                 table.String("problem spans", 18, table.AlignRight, func(i compactionMetricsInfo) string { return i.problemSpans }),
     827              :         )
     828              :         keysInfoTableTopHeader = `KEYS`
     829              :         keysInfoTable          = table.Define[keysInfo](
     830            1 :                 table.String("range keys", 16, table.AlignRight, func(i keysInfo) string { return i.rangeKeys }),
     831              :                 table.Div(),
     832            1 :                 table.String("tombstones", 16, table.AlignRight, func(i keysInfo) string { return i.tombstones }),
     833              :                 table.Div(),
     834            1 :                 table.String("missized tombstones", 24, table.AlignRight, func(i keysInfo) string { return i.missizedTombstones }),
     835              :                 table.Div(),
     836            1 :                 table.String("point dels", 15, table.AlignRight, func(i keysInfo) string { return i.pointDels }),
     837              :                 table.Div(),
     838            1 :                 table.String("range dels", 15, table.AlignRight, func(i keysInfo) string { return i.rangeDels }),
     839              :         )
     840              :         compressionTableHeader = `COMPRESSION`
     841              :         compressionTable       = table.Define[compressionInfo](
     842            1 :                 table.String("algorithm", 13, table.AlignRight, func(i compressionInfo) string { return i.algorithm }),
     843              :                 table.Div(),
     844            1 :                 table.String("tables", 13, table.AlignRight, func(i compressionInfo) string { return i.tables }),
     845              :                 table.Div(),
     846            1 :                 table.String("blob files", 13, table.AlignRight, func(i compressionInfo) string { return i.blobFiles }),
     847              :         )
     848              : )
     849              : 
     850              : type commitPipelineInfo struct {
     851              :         files     string
     852              :         written   string
     853              :         overhead  string
     854              :         flushes   string
     855              :         live      string
     856              :         zombie    string
     857              :         total     string
     858              :         flushable string
     859              : }
     860              : 
     861              : type blockCacheInfo struct {
     862              :         level       string
     863              :         missRate    string
     864              :         perCategory [cache.NumCategories]string
     865              : }
     866              : 
     867            1 : func makeBlockCacheInfo(hm *cache.HitsAndMisses) []blockCacheInfo {
     868            1 :         // missRateAndPercentage returns a string that shows misses/(hits+misses) and
     869            1 :         // misses/totalMisses as percentages; for example: "12% [4.5%]".
     870            1 :         missRateAndPercentage := func(hits, misses, totalMisses int64) string {
     871            1 :                 if hits == 0 && misses == 0 {
     872            1 :                         return ""
     873            1 :                 }
     874            1 :                 return fmt.Sprintf("%s [%s]",
     875            1 :                         crhumanize.Percent(misses, hits+misses),
     876            1 :                         crhumanize.Percent(misses, totalMisses),
     877            1 :                 )
     878              :         }
     879              : 
     880            1 :         totalHits, totalMisses := hm.Aggregate()
     881            1 :         res := make([]blockCacheInfo, 0, cache.NumLevels+1)
     882            1 :         for level := range cache.Levels {
     883            1 :                 levelHits, levelMisses := hm.AggregateLevel(level)
     884            1 :                 if levelHits == 0 && levelMisses == 0 {
     885            1 :                         // Skip levels with no activity.
     886            1 :                         continue
     887              :                 }
     888            1 :                 bci := blockCacheInfo{
     889            1 :                         level:    level.String(),
     890            1 :                         missRate: missRateAndPercentage(levelHits, levelMisses, totalMisses),
     891            1 :                 }
     892            1 :                 for category := range cache.Categories {
     893            1 :                         hits, misses := hm.Get(level, category)
     894            1 :                         bci.perCategory[category] = missRateAndPercentage(hits, misses, totalMisses)
     895            1 :                 }
     896            1 :                 res = append(res, bci)
     897              :         }
     898            1 :         last := blockCacheInfo{
     899            1 :                 level:    "total",
     900            1 :                 missRate: fmt.Sprintf("%s%%", crhumanize.Float(percent(totalMisses, totalHits+totalMisses), 1)),
     901            1 :         }
     902            1 :         for category := range cache.Categories {
     903            1 :                 catHits, catMisses := hm.AggregateCategory(category)
     904            1 :                 last.perCategory[category] = missRateAndPercentage(catHits, catMisses, totalMisses)
     905            1 :         }
     906            1 :         return append(res, last)
     907              : }
     908              : 
     909              : type iteratorInfo struct {
     910              :         fcEntries        string
     911              :         fcHitRate        string
     912              :         bloomFilterUtil  string
     913              :         sstableItersOpen string
     914              :         snapshotsOpen    string
     915              : }
     916              : type tableInfo struct {
     917              :         stats   string
     918              :         backing string
     919              :         zombie  string
     920              : }
     921              : 
     922              : type blobInfo struct {
     923              :         live       string
     924              :         zombie     string
     925              :         total      string
     926              :         referenced string
     927              : }
     928              : 
     929              : type tableAndBlobInfo struct {
     930              :         tableInfo tableInfo
     931              :         blobInfo  blobInfo
     932              : }
     933              : 
     934              : type cgoMemInfo struct {
     935              :         tot          string
     936              :         bcTot        string
     937              :         bcData       string
     938              :         bcMaps       string
     939              :         bcEnts       string
     940              :         memtablesTot string
     941              : }
     942              : 
     943              : type compactionMetricsInfo struct {
     944              :         estimatedDebt string
     945              :         inProgress    string
     946              :         cancelled     string
     947              :         failed        int64
     948              :         problemSpans  string
     949              : }
     950              : 
     951              : type keysInfo struct {
     952              :         rangeKeys          string
     953              :         tombstones         string
     954              :         missizedTombstones string
     955              :         pointDels          string
     956              :         rangeDels          string
     957              : }
     958              : 
     959              : type compressionInfo struct {
     960              :         algorithm string
     961              :         tables    string
     962              :         blobFiles string
     963              : }
     964              : 
     965            1 : func makeCompressionInfo(algorithm string, table, blob CompressionStatsForSetting) compressionInfo {
     966            1 :         i := compressionInfo{algorithm: algorithm}
     967            1 :         if table.CompressedBytes > 0 {
     968            1 :                 i.tables = fmt.Sprintf("%s (CR=%s)", humanizeBytes(table.CompressedBytes), crhumanize.Float(table.CompressionRatio(), 2 /* precision */))
     969            1 :         }
     970            1 :         if blob.CompressedBytes > 0 {
     971            1 :                 i.blobFiles = fmt.Sprintf("%s (CR=%s)", humanizeBytes(blob.CompressedBytes), crhumanize.Float(blob.CompressionRatio(), 2 /* precision */))
     972            1 :         }
     973            1 :         return i
     974              : }
     975              : 
     976              : // String pretty-prints the metrics.
     977              : //
     978              : // See testdata/metrics for an example.
     979            1 : func (m *Metrics) String() string {
     980            1 :         wb := ascii.Make(128 /* width */, 80 /* height */)
     981            1 :         var total LevelMetrics
     982            1 :         for l := range numLevels {
     983            1 :                 total.Add(&m.Levels[l])
     984            1 :         }
     985              :         // Compute total bytes-in as the bytes written to the WAL + bytes ingested.
     986            1 :         total.TableBytesIn = m.WAL.BytesWritten + total.TableBytesIngested
     987            1 :         // Add the total bytes-in to the total bytes-flushed. This is to account for
     988            1 :         // the bytes written to the log and bytes written externally and then
     989            1 :         // ingested.
     990            1 :         total.TableBytesFlushed += total.TableBytesIn
     991            1 :         total.Score = math.NaN()
     992            1 :         total.FillFactor = math.NaN()
     993            1 :         total.CompensatedFillFactor = math.NaN()
     994            1 : 
     995            1 :         // LSM level metrics.
     996            1 :         cur := wb.At(0, 0)
     997            1 :         cur = cur.WriteString(levelMetricsTableTopHeader).NewlineReturn()
     998            1 :         cur = levelMetricsTable.Render(cur, table.RenderOptions{
     999            1 :                 HorizontalDividers: table.MakeHorizontalDividers(0, -1),
    1000            1 :         }, slices.Collect(m.LevelMetricsIter())...)
    1001            1 :         cur = cur.NewlineReturn()
    1002            1 : 
    1003            1 :         // Compaction level metrics.
    1004            1 :         cur = cur.WriteString(levelCompactionMetricsTableTopHeader).NewlineReturn()
    1005            1 :         cur = compactionLevelMetricsTable.Render(cur, table.RenderOptions{
    1006            1 :                 HorizontalDividers: table.MakeHorizontalDividers(0, -1),
    1007            1 :         }, slices.Collect(m.LevelMetricsIter())...)
    1008            1 : 
    1009            1 :         cur = cur.NewlineReturn()
    1010            1 :         cur = compactionKindTable.Render(cur, table.RenderOptions{
    1011            1 :                 HorizontalDividers: table.HorizontalDividers{},
    1012            1 :         }, m)
    1013            1 :         cur = cur.NewlineReturn()
    1014            1 : 
    1015            1 :         commitPipelineInfoContents := commitPipelineInfo{
    1016            1 :                 // wals.
    1017            1 :                 files:    fmt.Sprintf("%s (%s)", humanizeCount(m.WAL.Files), humanizeBytes(m.WAL.Size)),
    1018            1 :                 written:  fmt.Sprintf("%s: %s", humanizeBytes(m.WAL.BytesIn), humanizeBytes(m.WAL.BytesWritten)),
    1019            1 :                 overhead: fmt.Sprintf("%.1f%%", percent(int64(m.WAL.BytesWritten)-int64(m.WAL.BytesIn), int64(m.WAL.BytesIn))),
    1020            1 :                 // memtables.
    1021            1 :                 flushes: crhumanize.Count(m.Flush.Count).String(),
    1022            1 :                 live:    fmt.Sprintf("%s (%s)", humanizeCount(m.MemTable.Count), humanizeBytes(m.MemTable.Size)),
    1023            1 :                 zombie:  fmt.Sprintf("%s (%s)", humanizeCount(m.MemTable.ZombieCount), humanizeBytes(m.MemTable.ZombieSize)),
    1024            1 :                 // ingestions.
    1025            1 :                 total:     crhumanize.Count(m.WAL.BytesIn + m.WAL.BytesWritten).String(),
    1026            1 :                 flushable: fmt.Sprintf("%s (%s)", humanizeCount(m.Flush.AsIngestCount), humanizeBytes(m.Flush.AsIngestBytes)),
    1027            1 :         }
    1028            1 :         cur = cur.WriteString(commitPipelineInfoTableTopHeader).NewlineReturn()
    1029            1 :         cur = cur.WriteString(commitPipelineInfoTableSubHeader).NewlineReturn()
    1030            1 :         cur = commitPipelineInfoTable.Render(cur, table.RenderOptions{}, commitPipelineInfoContents)
    1031            1 :         cur = cur.NewlineReturn()
    1032            1 : 
    1033            1 :         cur = cur.WriteString(blockCacheInfoTableTopHeader)
    1034            1 :         cur = cur.Printf(": %s entries (%s)", humanizeCount(m.BlockCache.Count), humanizeBytes(m.BlockCache.Size))
    1035            1 :         cur = cur.NewlineReturn()
    1036            1 : 
    1037            1 :         cur = cur.WriteString("                 miss rate [percentage of total misses] since start\n")
    1038            1 :         bci := makeBlockCacheInfo(&m.BlockCache.HitsAndMisses)
    1039            1 :         cur = blockCacheInfoTable.Render(cur, table.RenderOptions{
    1040            1 :                 HorizontalDividers: table.MakeHorizontalDividers(0, len(bci)-1),
    1041            1 :         }, bci...)
    1042            1 :         cur = cur.NewlineReturn()
    1043            1 : 
    1044            1 :         if m.BlockCache.Recent[0].Since != 0 {
    1045            0 :                 cur = cur.WriteString("                 miss rate [percentage of total misses] over last ~10m\n") // TODO(radu): print exact timeframe
    1046            0 :                 bci = makeBlockCacheInfo(&m.BlockCache.Recent[0].HitsAndMisses)
    1047            0 :                 cur = blockCacheInfoTable.Render(cur, table.RenderOptions{
    1048            0 :                         HorizontalDividers: table.MakeHorizontalDividers(0, len(bci)-1),
    1049            0 :                 }, bci...)
    1050            0 :                 cur = cur.NewlineReturn()
    1051            0 :         }
    1052              : 
    1053            1 :         if m.BlockCache.Recent[1].Since != 0 {
    1054            0 :                 cur = cur.WriteString("                 miss rate [percentage of total misses] over last ~1h\n") // TODO(radu): print exact timeframe
    1055            0 :                 bci = makeBlockCacheInfo(&m.BlockCache.Recent[1].HitsAndMisses)
    1056            0 :                 cur = blockCacheInfoTable.Render(cur, table.RenderOptions{
    1057            0 :                         HorizontalDividers: table.MakeHorizontalDividers(0, len(bci)-1),
    1058            0 :                 }, bci...)
    1059            0 :                 cur = cur.NewlineReturn()
    1060            0 :         }
    1061              : 
    1062            1 :         iteratorInfoContents := iteratorInfo{
    1063            1 :                 fcEntries:        fmt.Sprintf("%s (%s)", humanizeCount(m.FileCache.TableCount), humanizeBytes(m.FileCache.Size)),
    1064            1 :                 fcHitRate:        fmt.Sprintf("%.1f%%", hitRate(m.FileCache.Hits, m.FileCache.Misses)),
    1065            1 :                 bloomFilterUtil:  fmt.Sprintf("%.1f%%", hitRate(m.Filter.Hits, m.Filter.Misses)),
    1066            1 :                 sstableItersOpen: humanizeCount(m.TableIters),
    1067            1 :                 snapshotsOpen:    humanizeCount(m.Snapshots.Count),
    1068            1 :         }
    1069            1 :         cur = cur.WriteString(iteratorInfoTableTopHeader).NewlineReturn()
    1070            1 :         cur = cur.WriteString(iteratorInfoTableSubHeader).NewlineReturn()
    1071            1 :         cur = iteratorInfoTable.Render(cur, table.RenderOptions{}, iteratorInfoContents)
    1072            1 :         cur = cur.NewlineReturn()
    1073            1 : 
    1074            1 :         status := fmt.Sprintf("%s pending", humanizeCount(m.Table.PendingStatsCollectionCount))
    1075            1 :         if !m.Table.InitialStatsCollectionComplete {
    1076            1 :                 status = "loading"
    1077            1 :         } else if m.Table.PendingStatsCollectionCount == 0 {
    1078            1 :                 status = "all loaded"
    1079            1 :         }
    1080            1 :         tableInfoContents := tableInfo{
    1081            1 :                 stats:   status,
    1082            1 :                 backing: fmt.Sprintf("%s (%s)", humanizeCount(m.Table.BackingTableCount), humanizeBytes(m.Table.BackingTableSize)),
    1083            1 :                 zombie:  fmt.Sprintf("%s (%s local:%s)", humanizeCount(m.Table.ZombieCount), humanizeBytes(m.Table.ZombieSize), humanizeBytes(m.Table.Local.ZombieSize)),
    1084            1 :         }
    1085            1 :         blobInfoContents := blobInfo{
    1086            1 :                 live:       fmt.Sprintf("%s (%s)", humanizeCount(m.BlobFiles.LiveCount), humanizeBytes(m.BlobFiles.LiveSize)),
    1087            1 :                 zombie:     fmt.Sprintf("%s (%s)", humanizeCount(m.BlobFiles.ZombieCount), humanizeBytes(m.BlobFiles.ZombieSize)),
    1088            1 :                 total:      humanizeBytes(m.BlobFiles.ValueSize),
    1089            1 :                 referenced: fmt.Sprintf("%.0f%% (%s)", percent(m.BlobFiles.ReferencedValueSize, m.BlobFiles.ValueSize), humanizeBytes(m.BlobFiles.ReferencedValueSize)),
    1090            1 :         }
    1091            1 :         fileInfoContents := tableAndBlobInfo{
    1092            1 :                 tableInfo: tableInfoContents,
    1093            1 :                 blobInfo:  blobInfoContents,
    1094            1 :         }
    1095            1 :         cur = cur.WriteString(fileInfoTableHeader).NewlineReturn()
    1096            1 :         cur = fileInfoTable.Render(cur, table.RenderOptions{}, fileInfoContents)
    1097            1 :         cur = cur.NewlineReturn()
    1098            1 : 
    1099            1 :         var inUseTotal uint64
    1100            1 :         for i := range m.manualMemory {
    1101            1 :                 inUseTotal += m.manualMemory[i].InUseBytes
    1102            1 :         }
    1103            1 :         inUse := func(purpose manual.Purpose) uint64 {
    1104            1 :                 return m.manualMemory[purpose].InUseBytes
    1105            1 :         }
    1106            1 :         cgoMemInfoContents := cgoMemInfo{
    1107            1 :                 tot: humanizeBytes(inUseTotal),
    1108            1 :                 bcTot: humanizeBytes(inUse(manual.BlockCacheData) +
    1109            1 :                         inUse(manual.BlockCacheMap) + inUse(manual.BlockCacheEntry)),
    1110            1 :                 bcData:       humanizeBytes(inUse(manual.BlockCacheData)),
    1111            1 :                 bcMaps:       humanizeBytes(inUse(manual.BlockCacheMap)),
    1112            1 :                 bcEnts:       humanizeBytes(inUse(manual.BlockCacheEntry)),
    1113            1 :                 memtablesTot: humanizeBytes(inUse(manual.MemTable)),
    1114            1 :         }
    1115            1 :         cur = cur.WriteString(cgoMemInfoTableHeader).NewlineReturn()
    1116            1 :         cur = cgoMemInfoTable.Render(cur, table.RenderOptions{}, cgoMemInfoContents)
    1117            1 :         cur = cur.NewlineReturn()
    1118            1 : 
    1119            1 :         compactionMetricsInfoContents := compactionMetricsInfo{
    1120            1 :                 estimatedDebt: humanizeBytes(m.Compact.EstimatedDebt),
    1121            1 :                 inProgress: fmt.Sprintf("%s (%s)", humanizeCount(m.Compact.NumInProgress),
    1122            1 :                         humanizeBytes(m.Compact.InProgressBytes)),
    1123            1 :                 cancelled: fmt.Sprintf("%s (%s)", humanizeCount(m.Compact.CancelledCount),
    1124            1 :                         humanizeBytes(m.Compact.CancelledBytes)),
    1125            1 :                 failed:       m.Compact.FailedCount,
    1126            1 :                 problemSpans: fmt.Sprintf("%d%s", m.Compact.NumProblemSpans, ifNonZero(m.Compact.NumProblemSpans, "!!")),
    1127            1 :         }
    1128            1 :         cur = cur.WriteString(compactionInfoTableTopHeader).NewlineReturn()
    1129            1 :         cur = compactionInfoTable.Render(cur, table.RenderOptions{}, compactionMetricsInfoContents)
    1130            1 :         cur = cur.NewlineReturn()
    1131            1 : 
    1132            1 :         keysInfoContents := keysInfo{
    1133            1 :                 rangeKeys:          humanizeCount(m.Keys.RangeKeySetsCount),
    1134            1 :                 tombstones:         humanizeCount(m.Keys.TombstoneCount),
    1135            1 :                 missizedTombstones: fmt.Sprintf("%d%s", m.Keys.MissizedTombstonesCount, ifNonZero(m.Keys.MissizedTombstonesCount, "!!")),
    1136            1 :                 pointDels:          humanizeBytes(m.Table.Garbage.PointDeletionsBytesEstimate),
    1137            1 :                 rangeDels:          humanizeBytes(m.Table.Garbage.RangeDeletionsBytesEstimate),
    1138            1 :         }
    1139            1 :         cur = cur.WriteString(keysInfoTableTopHeader).NewlineReturn()
    1140            1 :         cur = keysInfoTable.Render(cur, table.RenderOptions{}, keysInfoContents)
    1141            1 :         cur = cur.NewlineReturn()
    1142            1 : 
    1143            1 :         cur = cur.WriteString(compressionTableHeader).NewlineReturn()
    1144            1 : 
    1145            1 :         compressionContents := []compressionInfo{
    1146            1 :                 {
    1147            1 :                         algorithm: "none",
    1148            1 :                         tables:    humanizeBytesOrEmpty(m.Table.Compression.NoCompressionBytes),
    1149            1 :                         blobFiles: humanizeBytesOrEmpty(m.BlobFiles.Compression.NoCompressionBytes),
    1150            1 :                 },
    1151            1 :                 makeCompressionInfo("snappy", m.Table.Compression.Snappy, m.BlobFiles.Compression.Snappy),
    1152            1 :                 makeCompressionInfo("minlz", m.Table.Compression.MinLZ, m.BlobFiles.Compression.MinLZ),
    1153            1 :                 makeCompressionInfo("zstd", m.Table.Compression.Zstd, m.BlobFiles.Compression.Zstd),
    1154            1 :                 {
    1155            1 :                         algorithm: "unknown",
    1156            1 :                         tables:    humanizeBytesOrEmpty(m.Table.Compression.CompressedBytesWithoutStats),
    1157            1 :                         blobFiles: humanizeBytesOrEmpty(m.BlobFiles.Compression.CompressedBytesWithoutStats),
    1158            1 :                 },
    1159            1 :         }
    1160            1 :         compressionContents = slices.DeleteFunc(compressionContents, func(i compressionInfo) bool {
    1161            1 :                 return i.tables == "" && i.blobFiles == ""
    1162            1 :         })
    1163            1 :         compressionTable.Render(cur, table.RenderOptions{}, compressionContents...)
    1164            1 : 
    1165            1 :         return wb.String()
    1166              : }
    1167              : 
    1168            1 : func ifNonZero[T constraints.Integer](v T, s string) string {
    1169            1 :         if v > 0 {
    1170            1 :                 return s
    1171            1 :         }
    1172            1 :         return ""
    1173              : }
    1174              : 
    1175            1 : func hitRate(hits, misses int64) float64 {
    1176            1 :         return percent(hits, hits+misses)
    1177            1 : }
    1178              : 
    1179            1 : func percent[T constraints.Integer](numerator, denominator T) float64 {
    1180            1 :         if denominator == 0 {
    1181            1 :                 return 0
    1182            1 :         }
    1183            1 :         return 100 * float64(numerator) / float64(denominator)
    1184              : }
    1185              : 
    1186              : // StringForTests is identical to m.String() on 64-bit platforms. It is used to
    1187              : // provide a platform-independent result for tests.
    1188            1 : func (m *Metrics) StringForTests() string {
    1189            1 :         mCopy := *m
    1190            1 : 
    1191            1 :         // We recalculate the file cache size using the 64-bit sizes, and we ignore
    1192            1 :         // the genericcache metadata size which is harder to adjust.
    1193            1 :         const sstableReaderSize64bit = 280
    1194            1 :         const blobFileReaderSize64bit = 112
    1195            1 :         mCopy.FileCache.Size = mCopy.FileCache.TableCount*sstableReaderSize64bit + mCopy.FileCache.BlobFileCount*blobFileReaderSize64bit
    1196            1 :         if math.MaxInt == math.MaxInt64 {
    1197            1 :                 // Verify the 64-bit sizes, so they are kept updated.
    1198            1 :                 if sstableReaderSize64bit != unsafe.Sizeof(sstable.Reader{}) {
    1199            0 :                         panic(fmt.Sprintf("sstableReaderSize64bit should be updated to %d", unsafe.Sizeof(sstable.Reader{})))
    1200              :                 }
    1201            1 :                 if blobFileReaderSize64bit != unsafe.Sizeof(blob.FileReader{}) {
    1202            0 :                         panic(fmt.Sprintf("blobFileReaderSize64bit should be updated to %d", unsafe.Sizeof(blob.FileReader{})))
    1203              :                 }
    1204              :         }
    1205              :         // Don't show cgo memory statistics as they can vary based on architecture,
    1206              :         // invariants tag, etc.
    1207            1 :         mCopy.manualMemory = manual.Metrics{}
    1208            1 : 
    1209            1 :         // Clear the recent block cache stats as they can vary based on timing.
    1210            1 :         for i := range mCopy.BlockCache.Recent {
    1211            1 :                 mCopy.BlockCache.Recent[i].HitsAndMisses = cache.HitsAndMisses{}
    1212            1 :                 mCopy.BlockCache.Recent[i].Since = 0
    1213            1 :         }
    1214            1 :         return redact.StringWithoutMarkers(&mCopy)
    1215              : }
    1216              : 
    1217              : // LevelMetricsIter returns an iterator over all level metrics - including the
    1218              : // total for all levels.
    1219            1 : func (m *Metrics) LevelMetricsIter() iter.Seq[*LevelMetrics] {
    1220            1 :         return func(yield func(*LevelMetrics) bool) {
    1221            1 :                 for i := range m.Levels {
    1222            1 :                         lvlMetric := m.Levels[i]
    1223            1 :                         if lvlMetric.Score == 0 {
    1224            1 :                                 lvlMetric.Score = math.NaN()
    1225            1 :                         }
    1226            1 :                         if !yield(&lvlMetric) {
    1227            0 :                                 break
    1228              :                         }
    1229              :                 }
    1230            1 :                 t := m.Total()
    1231            1 :                 t.Score, t.FillFactor, t.CompensatedFillFactor = math.NaN(), math.NaN(), math.NaN()
    1232            1 :                 yield(&t)
    1233              :         }
    1234              : }
    1235              : 
    1236              : // levelMetricsDelta accumulates incremental ("delta") level metric updates
    1237              : // (e.g. from compactions or flushes).
    1238              : type levelMetricsDelta [manifest.NumLevels]*LevelMetrics
    1239              : 
    1240            2 : func (m *levelMetricsDelta) level(level int) *LevelMetrics {
    1241            2 :         if m[level] == nil {
    1242            2 :                 m[level] = &LevelMetrics{}
    1243            2 :         }
    1244            2 :         return m[level]
    1245              : }
    1246              : 
    1247            2 : func (m *Metrics) updateLevelMetrics(updates levelMetricsDelta) {
    1248            2 :         for i, u := range updates {
    1249            2 :                 if u != nil {
    1250            2 :                         m.Levels[i].Add(u)
    1251            2 :                 }
    1252              :         }
    1253              : }
    1254              : 
    1255            1 : func humanizeCount[T crhumanize.Integer](value T) string {
    1256            1 :         return crhumanize.Count(value, crhumanize.Compact, crhumanize.OmitI).String()
    1257            1 : }
    1258              : 
    1259            1 : func humanizeBytes[T crhumanize.Integer](value T) string {
    1260            1 :         return crhumanize.Bytes(value, crhumanize.Compact, crhumanize.OmitI).String()
    1261            1 : }
    1262              : 
    1263            1 : func humanizeBytesOrEmpty[T crhumanize.Integer](value T) string {
    1264            1 :         if value == 0 {
    1265            1 :                 return ""
    1266            1 :         }
    1267            1 :         return crhumanize.Bytes(value, crhumanize.Compact, crhumanize.OmitI).String()
    1268              : }
        

Generated by: LCOV version 2.0-1