Line data Source code
1 : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package pebble
6 :
7 : import (
8 : "fmt"
9 : "math"
10 : "time"
11 :
12 : "github.com/cockroachdb/pebble/internal/base"
13 : "github.com/cockroachdb/pebble/internal/cache"
14 : "github.com/cockroachdb/pebble/internal/humanize"
15 : "github.com/cockroachdb/pebble/objstorage/objstorageprovider/sharedcache"
16 : "github.com/cockroachdb/pebble/record"
17 : "github.com/cockroachdb/pebble/sstable"
18 : "github.com/cockroachdb/pebble/wal"
19 : "github.com/cockroachdb/redact"
20 : "github.com/prometheus/client_golang/prometheus"
21 : )
22 :
23 : // CacheMetrics holds metrics for the block and table cache.
24 : type CacheMetrics = cache.Metrics
25 :
26 : // FilterMetrics holds metrics for the filter policy
27 : type FilterMetrics = sstable.FilterMetrics
28 :
29 : // ThroughputMetric is a cumulative throughput metric. See the detailed
30 : // comment in base.
31 : type ThroughputMetric = base.ThroughputMetric
32 :
33 : // SecondaryCacheMetrics holds metrics for the persistent secondary cache
34 : // that caches commonly accessed blocks from blob storage on a local
35 : // file system.
36 : type SecondaryCacheMetrics = sharedcache.Metrics
37 :
38 : // LevelMetrics holds per-level metrics such as the number of files and total
39 : // size of the files, and compaction related metrics.
40 : type LevelMetrics struct {
41 : // The number of sublevels within the level. The sublevel count corresponds
42 : // to the read amplification for the level. An empty level will have a
43 : // sublevel count of 0, implying no read amplification. Only L0 will have
44 : // a sublevel count other than 0 or 1.
45 : Sublevels int32
46 : // The total number of files in the level.
47 : NumFiles int64
48 : // The total number of virtual sstables in the level.
49 : NumVirtualFiles uint64
50 : // The total size in bytes of the files in the level.
51 : Size int64
52 : // The total size of the virtual sstables in the level.
53 : VirtualSize uint64
54 : // The level's compaction score. This is the compensatedScoreRatio in the
55 : // candidateLevelInfo.
56 : Score float64
57 : // The number of incoming bytes from other levels read during
58 : // compactions. This excludes bytes moved and bytes ingested. For L0 this is
59 : // the bytes written to the WAL.
60 : BytesIn uint64
61 : // The number of bytes ingested. The sibling metric for tables is
62 : // TablesIngested.
63 : BytesIngested uint64
64 : // The number of bytes moved into the level by a "move" compaction. The
65 : // sibling metric for tables is TablesMoved.
66 : BytesMoved uint64
67 : // The number of bytes read for compactions at the level. This includes bytes
68 : // read from other levels (BytesIn), as well as bytes read for the level.
69 : BytesRead uint64
70 : // The number of bytes written during compactions. The sibling
71 : // metric for tables is TablesCompacted. This metric may be summed
72 : // with BytesFlushed to compute the total bytes written for the level.
73 : BytesCompacted uint64
74 : // The number of bytes written during flushes. The sibling
75 : // metrics for tables is TablesFlushed. This metric is always
76 : // zero for all levels other than L0.
77 : BytesFlushed uint64
78 : // The number of sstables compacted to this level.
79 : TablesCompacted uint64
80 : // The number of sstables flushed to this level.
81 : TablesFlushed uint64
82 : // The number of sstables ingested into the level.
83 : TablesIngested uint64
84 : // The number of sstables moved to this level by a "move" compaction.
85 : TablesMoved uint64
86 :
87 : MultiLevel struct {
88 : // BytesInTop are the total bytes in a multilevel compaction coming from the top level.
89 : BytesInTop uint64
90 :
91 : // BytesIn, exclusively for multiLevel compactions.
92 : BytesIn uint64
93 :
94 : // BytesRead, exclusively for multilevel compactions.
95 : BytesRead uint64
96 : }
97 :
98 : // Additional contains misc additional metrics that are not always printed.
99 : Additional struct {
100 : // The sum of Properties.ValueBlocksSize for all the sstables in this
101 : // level. Printed by LevelMetrics.format iff there is at least one level
102 : // with a non-zero value.
103 : ValueBlocksSize uint64
104 : // Cumulative metrics about bytes written to data blocks and value blocks,
105 : // via compactions (except move compactions) or flushes. Not printed by
106 : // LevelMetrics.format, but are available to sophisticated clients.
107 : BytesWrittenDataBlocks uint64
108 : BytesWrittenValueBlocks uint64
109 : }
110 : }
111 :
112 : // Add updates the counter metrics for the level.
113 1 : func (m *LevelMetrics) Add(u *LevelMetrics) {
114 1 : m.NumFiles += u.NumFiles
115 1 : m.NumVirtualFiles += u.NumVirtualFiles
116 1 : m.VirtualSize += u.VirtualSize
117 1 : m.Size += u.Size
118 1 : m.BytesIn += u.BytesIn
119 1 : m.BytesIngested += u.BytesIngested
120 1 : m.BytesMoved += u.BytesMoved
121 1 : m.BytesRead += u.BytesRead
122 1 : m.BytesCompacted += u.BytesCompacted
123 1 : m.BytesFlushed += u.BytesFlushed
124 1 : m.TablesCompacted += u.TablesCompacted
125 1 : m.TablesFlushed += u.TablesFlushed
126 1 : m.TablesIngested += u.TablesIngested
127 1 : m.TablesMoved += u.TablesMoved
128 1 : m.MultiLevel.BytesInTop += u.MultiLevel.BytesInTop
129 1 : m.MultiLevel.BytesRead += u.MultiLevel.BytesRead
130 1 : m.MultiLevel.BytesIn += u.MultiLevel.BytesIn
131 1 : m.Additional.BytesWrittenDataBlocks += u.Additional.BytesWrittenDataBlocks
132 1 : m.Additional.BytesWrittenValueBlocks += u.Additional.BytesWrittenValueBlocks
133 1 : m.Additional.ValueBlocksSize += u.Additional.ValueBlocksSize
134 1 : }
135 :
136 : // WriteAmp computes the write amplification for compactions at this
137 : // level. Computed as (BytesFlushed + BytesCompacted) / BytesIn.
138 1 : func (m *LevelMetrics) WriteAmp() float64 {
139 1 : if m.BytesIn == 0 {
140 1 : return 0
141 1 : }
142 1 : return float64(m.BytesFlushed+m.BytesCompacted) / float64(m.BytesIn)
143 : }
144 :
145 : // Metrics holds metrics for various subsystems of the DB such as the Cache,
146 : // Compactions, WAL, and per-Level metrics.
147 : //
148 : // TODO(peter): The testing of these metrics is relatively weak. There should
149 : // be testing that performs various operations on a DB and verifies that the
150 : // metrics reflect those operations.
151 : type Metrics struct {
152 : BlockCache CacheMetrics
153 :
154 : Compact struct {
155 : // The total number of compactions, and per-compaction type counts.
156 : Count int64
157 : DefaultCount int64
158 : DeleteOnlyCount int64
159 : ElisionOnlyCount int64
160 : CopyCount int64
161 : MoveCount int64
162 : ReadCount int64
163 : TombstoneDensityCount int64
164 : RewriteCount int64
165 : MultiLevelCount int64
166 : CounterLevelCount int64
167 : // An estimate of the number of bytes that need to be compacted for the LSM
168 : // to reach a stable state.
169 : EstimatedDebt uint64
170 : // Number of bytes present in sstables being written by in-progress
171 : // compactions. This value will be zero if there are no in-progress
172 : // compactions.
173 : InProgressBytes int64
174 : // Number of compactions that are in-progress.
175 : NumInProgress int64
176 : // MarkedFiles is a count of files that are marked for
177 : // compaction. Such files are compacted in a rewrite compaction
178 : // when no other compactions are picked.
179 : MarkedFiles int
180 : // Duration records the cumulative duration of all compactions since the
181 : // database was opened.
182 : Duration time.Duration
183 : }
184 :
185 : Ingest struct {
186 : // The total number of ingestions
187 : Count uint64
188 : }
189 :
190 : Flush struct {
191 : // The total number of flushes.
192 : Count int64
193 : WriteThroughput ThroughputMetric
194 : // Number of flushes that are in-progress. In the current implementation
195 : // this will always be zero or one.
196 : NumInProgress int64
197 : // AsIngestCount is a monotonically increasing counter of flush operations
198 : // handling ingested tables.
199 : AsIngestCount uint64
200 : // AsIngestCount is a monotonically increasing counter of tables ingested as
201 : // flushables.
202 : AsIngestTableCount uint64
203 : // AsIngestBytes is a monotonically increasing counter of the bytes flushed
204 : // for flushables that originated as ingestion operations.
205 : AsIngestBytes uint64
206 : }
207 :
208 : Filter FilterMetrics
209 :
210 : Levels [numLevels]LevelMetrics
211 :
212 : MemTable struct {
213 : // The number of bytes allocated by memtables and large (flushable)
214 : // batches.
215 : Size uint64
216 : // The count of memtables.
217 : Count int64
218 : // The number of bytes present in zombie memtables which are no longer
219 : // referenced by the current DB state. An unbounded number of memtables
220 : // may be zombie if they're still in use by an iterator. One additional
221 : // memtable may be zombie if it's no longer in use and waiting to be
222 : // recycled.
223 : ZombieSize uint64
224 : // The count of zombie memtables.
225 : ZombieCount int64
226 : }
227 :
228 : Keys struct {
229 : // The approximate count of internal range key set keys in the database.
230 : RangeKeySetsCount uint64
231 : // The approximate count of internal tombstones (DEL, SINGLEDEL and
232 : // RANGEDEL key kinds) within the database.
233 : TombstoneCount uint64
234 : // A cumulative total number of missized DELSIZED keys encountered by
235 : // compactions since the database was opened.
236 : MissizedTombstonesCount uint64
237 : }
238 :
239 : Snapshots struct {
240 : // The number of currently open snapshots.
241 : Count int
242 : // The sequence number of the earliest, currently open snapshot.
243 : EarliestSeqNum base.SeqNum
244 : // A running tally of keys written to sstables during flushes or
245 : // compactions that would've been elided if it weren't for open
246 : // snapshots.
247 : PinnedKeys uint64
248 : // A running cumulative sum of the size of keys and values written to
249 : // sstables during flushes or compactions that would've been elided if
250 : // it weren't for open snapshots.
251 : PinnedSize uint64
252 : }
253 :
254 : Table struct {
255 : // The number of bytes present in obsolete tables which are no longer
256 : // referenced by the current DB state or any open iterators.
257 : ObsoleteSize uint64
258 : // The count of obsolete tables.
259 : ObsoleteCount int64
260 : // The number of bytes present in zombie tables which are no longer
261 : // referenced by the current DB state but are still in use by an iterator.
262 : ZombieSize uint64
263 : // The count of zombie tables.
264 : ZombieCount int64
265 : // The count of sstables backing virtual tables.
266 : BackingTableCount uint64
267 : // The sum of the sizes of the BackingTableCount sstables that are backing virtual tables.
268 : BackingTableSize uint64
269 : // The number of sstables that are compressed with an unknown compression
270 : // algorithm.
271 : CompressedCountUnknown int64
272 : // The number of sstables that are compressed with the default compression
273 : // algorithm, snappy.
274 : CompressedCountSnappy int64
275 : // The number of sstables that are compressed with zstd.
276 : CompressedCountZstd int64
277 : // The number of sstables that are uncompressed.
278 : CompressedCountNone int64
279 :
280 : // Local file sizes.
281 : Local struct {
282 : // LiveSize is the number of bytes in live tables.
283 : LiveSize uint64
284 : // ObsoleteSize is the number of bytes in obsolete tables.
285 : ObsoleteSize uint64
286 : // ZombieSize is the number of bytes in zombie tables.
287 : ZombieSize uint64
288 : }
289 : }
290 :
291 : TableCache CacheMetrics
292 :
293 : // Count of the number of open sstable iterators.
294 : TableIters int64
295 : // Uptime is the total time since this DB was opened.
296 : Uptime time.Duration
297 :
298 : WAL struct {
299 : // Number of live WAL files.
300 : Files int64
301 : // Number of obsolete WAL files.
302 : ObsoleteFiles int64
303 : // Physical size of the obsolete WAL files.
304 : ObsoletePhysicalSize uint64
305 : // Size of the live data in the WAL files. Note that with WAL file
306 : // recycling this is less than the actual on-disk size of the WAL files.
307 : Size uint64
308 : // Physical size of the WAL files on-disk. With WAL file recycling,
309 : // this is greater than the live data in WAL files.
310 : //
311 : // TODO(sumeer): it seems this does not include ObsoletePhysicalSize.
312 : // Should the comment be updated?
313 : PhysicalSize uint64
314 : // Number of logical bytes written to the WAL.
315 : BytesIn uint64
316 : // Number of bytes written to the WAL.
317 : BytesWritten uint64
318 : // Failover contains failover stats. Empty if failover is not enabled.
319 : Failover wal.FailoverStats
320 : }
321 :
322 : LogWriter struct {
323 : FsyncLatency prometheus.Histogram
324 : record.LogWriterMetrics
325 : }
326 :
327 : CategoryStats []sstable.CategoryStatsAggregate
328 :
329 : SecondaryCacheMetrics SecondaryCacheMetrics
330 :
331 : private struct {
332 : optionsFileSize uint64
333 : manifestFileSize uint64
334 : }
335 : }
336 :
337 : var (
338 : // FsyncLatencyBuckets are prometheus histogram buckets suitable for a histogram
339 : // that records latencies for fsyncs.
340 : FsyncLatencyBuckets = append(
341 : prometheus.LinearBuckets(0.0, float64(time.Microsecond*100), 50),
342 : prometheus.ExponentialBucketsRange(float64(time.Millisecond*5), float64(10*time.Second), 50)...,
343 : )
344 :
345 : // SecondaryCacheIOBuckets exported to enable exporting from package pebble to
346 : // enable exporting metrics with below buckets in CRDB.
347 : SecondaryCacheIOBuckets = sharedcache.IOBuckets
348 : // SecondaryCacheChannelWriteBuckets exported to enable exporting from package
349 : // pebble to enable exporting metrics with below buckets in CRDB.
350 : SecondaryCacheChannelWriteBuckets = sharedcache.ChannelWriteBuckets
351 : )
352 :
353 : // DiskSpaceUsage returns the total disk space used by the database in bytes,
354 : // including live and obsolete files. This only includes local files, i.e.,
355 : // remote files (as known to objstorage.Provider) are not included.
356 1 : func (m *Metrics) DiskSpaceUsage() uint64 {
357 1 : var usageBytes uint64
358 1 : usageBytes += m.WAL.PhysicalSize
359 1 : usageBytes += m.WAL.ObsoletePhysicalSize
360 1 : usageBytes += m.Table.Local.LiveSize
361 1 : usageBytes += m.Table.Local.ObsoleteSize
362 1 : usageBytes += m.Table.Local.ZombieSize
363 1 : usageBytes += m.private.optionsFileSize
364 1 : usageBytes += m.private.manifestFileSize
365 1 : // TODO(sumeer): InProgressBytes does not distinguish between local and
366 1 : // remote files. This causes a small error. Fix.
367 1 : usageBytes += uint64(m.Compact.InProgressBytes)
368 1 : return usageBytes
369 1 : }
370 :
371 : // NumVirtual is the number of virtual sstables in the latest version
372 : // summed over every level in the lsm.
373 1 : func (m *Metrics) NumVirtual() uint64 {
374 1 : var n uint64
375 1 : for _, level := range m.Levels {
376 1 : n += level.NumVirtualFiles
377 1 : }
378 1 : return n
379 : }
380 :
381 : // VirtualSize is the sum of the sizes of the virtual sstables in the
382 : // latest version. BackingTableSize - VirtualSize gives an estimate for
383 : // the space amplification caused by not compacting virtual sstables.
384 1 : func (m *Metrics) VirtualSize() uint64 {
385 1 : var size uint64
386 1 : for _, level := range m.Levels {
387 1 : size += level.VirtualSize
388 1 : }
389 1 : return size
390 : }
391 :
392 : // ReadAmp returns the current read amplification of the database.
393 : // It's computed as the number of sublevels in L0 + the number of non-empty
394 : // levels below L0.
395 1 : func (m *Metrics) ReadAmp() int {
396 1 : var ramp int32
397 1 : for _, l := range m.Levels {
398 1 : ramp += l.Sublevels
399 1 : }
400 1 : return int(ramp)
401 : }
402 :
403 : // Total returns the sum of the per-level metrics and WAL metrics.
404 1 : func (m *Metrics) Total() LevelMetrics {
405 1 : var total LevelMetrics
406 1 : for level := 0; level < numLevels; level++ {
407 1 : l := &m.Levels[level]
408 1 : total.Add(l)
409 1 : total.Sublevels += l.Sublevels
410 1 : }
411 : // Compute total bytes-in as the bytes written to the WAL + bytes ingested.
412 1 : total.BytesIn = m.WAL.BytesWritten + total.BytesIngested
413 1 : // Add the total bytes-in to the total bytes-flushed. This is to account for
414 1 : // the bytes written to the log and bytes written externally and then
415 1 : // ingested.
416 1 : total.BytesFlushed += total.BytesIn
417 1 : return total
418 : }
419 :
420 : // String pretty-prints the metrics as below:
421 : //
422 : // | | | | ingested | moved | written | | amp
423 : // level | tables size val-bl vtables | score | in | tables size | tables size | tables size | read | r w
424 : // ------+-----------------------------+-------+-------+--------------+--------------+--------------+-------+---------
425 : // 0 | 101 102B 0B 0 | 103.0 | 104B | 112 104B | 113 106B | 221 217B | 107B | 1 2.1
426 : // 1 | 201 202B 0B 0 | 203.0 | 204B | 212 204B | 213 206B | 421 417B | 207B | 2 2.0
427 : // 2 | 301 302B 0B 0 | 303.0 | 304B | 312 304B | 313 306B | 621 617B | 307B | 3 2.0
428 : // 3 | 401 402B 0B 0 | 403.0 | 404B | 412 404B | 413 406B | 821 817B | 407B | 4 2.0
429 : // 4 | 501 502B 0B 0 | 503.0 | 504B | 512 504B | 513 506B | 1.0K 1017B | 507B | 5 2.0
430 : // 5 | 601 602B 0B 0 | 603.0 | 604B | 612 604B | 613 606B | 1.2K 1.2KB | 607B | 6 2.0
431 : // 6 | 701 702B 0B 0 | - | 704B | 712 704B | 713 706B | 1.4K 1.4KB | 707B | 7 2.0
432 : // total | 2.8K 2.7KB 0B 0 | - | 2.8KB | 2.9K 2.8KB | 2.9K 2.8KB | 5.7K 8.4KB | 2.8KB | 28 3.0
433 : // -------------------------------------------------------------------------------------------------------------------
434 : // WAL: 22 files (24B) in: 25B written: 26B (4% overhead)
435 : // Flushes: 8
436 : // Compactions: 5 estimated debt: 6B in progress: 2 (7B)
437 : // default: 27 delete: 28 elision: 29 move: 30 read: 31 rewrite: 32 multi-level: 33
438 : // MemTables: 12 (11B) zombie: 14 (13B)
439 : // Zombie tables: 16 (15B)
440 : // Backing tables: 0 (0B)
441 : // Block cache: 2 entries (1B) hit rate: 42.9%
442 : // Table cache: 18 entries (17B) hit rate: 48.7%
443 : // Secondary cache: 40 entries (40B) hit rate: 49.9%
444 : // Snapshots: 4 earliest seq num: 1024
445 : // Table iters: 21
446 : // Filter utility: 47.4%
447 : // Ingestions: 27 as flushable: 36 (34B in 35 tables)
448 1 : func (m *Metrics) String() string {
449 1 : return redact.StringWithoutMarkers(m)
450 1 : }
451 :
452 : var _ redact.SafeFormatter = &Metrics{}
453 :
454 : // SafeFormat implements redact.SafeFormatter.
455 1 : func (m *Metrics) SafeFormat(w redact.SafePrinter, _ rune) {
456 1 : // NB: Pebble does not make any assumptions as to which Go primitive types
457 1 : // have been registered as safe with redact.RegisterSafeType and does not
458 1 : // register any types itself. Some of the calls to `redact.Safe`, etc are
459 1 : // superfluous in the context of CockroachDB, which registers all the Go
460 1 : // numeric types as safe.
461 1 :
462 1 : // TODO(jackson): There are a few places where we use redact.SafeValue
463 1 : // instead of redact.RedactableString. This is necessary because of a bug
464 1 : // whereby formatting a redact.RedactableString argument does not respect
465 1 : // width specifiers. When the issue is fixed, we can convert these to
466 1 : // RedactableStrings. https://github.com/cockroachdb/redact/issues/17
467 1 :
468 1 : multiExists := m.Compact.MultiLevelCount > 0
469 1 : appendIfMulti := func(line redact.SafeString) {
470 1 : if multiExists {
471 1 : w.SafeString(line)
472 1 : }
473 : }
474 1 : newline := func() {
475 1 : w.SafeString("\n")
476 1 : }
477 :
478 1 : w.SafeString(" | | | | ingested | moved | written | | amp")
479 1 : appendIfMulti(" | multilevel")
480 1 : newline()
481 1 : w.SafeString("level | tables size val-bl vtables | score | in | tables size | tables size | tables size | read | r w")
482 1 : appendIfMulti(" | top in read")
483 1 : newline()
484 1 : w.SafeString("------+-----------------------------+-------+-------+--------------+--------------+--------------+-------+---------")
485 1 : appendIfMulti("-+------------------")
486 1 : newline()
487 1 :
488 1 : // formatRow prints out a row of the table.
489 1 : formatRow := func(m *LevelMetrics, score float64) {
490 1 : scoreStr := "-"
491 1 : if !math.IsNaN(score) {
492 1 : // Try to keep the string no longer than 5 characters.
493 1 : switch {
494 1 : case score < 99.995:
495 1 : scoreStr = fmt.Sprintf("%.2f", score)
496 1 : case score < 999.95:
497 1 : scoreStr = fmt.Sprintf("%.1f", score)
498 0 : default:
499 0 : scoreStr = fmt.Sprintf("%.0f", score)
500 : }
501 : }
502 1 : var wampStr string
503 1 : if wamp := m.WriteAmp(); wamp > 99.5 {
504 0 : wampStr = fmt.Sprintf("%.0f", wamp)
505 1 : } else {
506 1 : wampStr = fmt.Sprintf("%.1f", wamp)
507 1 : }
508 :
509 1 : w.Printf("| %5s %6s %6s %7s | %5s | %5s | %5s %6s | %5s %6s | %5s %6s | %5s | %3d %4s",
510 1 : humanize.Count.Int64(m.NumFiles),
511 1 : humanize.Bytes.Int64(m.Size),
512 1 : humanize.Bytes.Uint64(m.Additional.ValueBlocksSize),
513 1 : humanize.Count.Uint64(m.NumVirtualFiles),
514 1 : redact.Safe(scoreStr),
515 1 : humanize.Bytes.Uint64(m.BytesIn),
516 1 : humanize.Count.Uint64(m.TablesIngested),
517 1 : humanize.Bytes.Uint64(m.BytesIngested),
518 1 : humanize.Count.Uint64(m.TablesMoved),
519 1 : humanize.Bytes.Uint64(m.BytesMoved),
520 1 : humanize.Count.Uint64(m.TablesFlushed+m.TablesCompacted),
521 1 : humanize.Bytes.Uint64(m.BytesFlushed+m.BytesCompacted),
522 1 : humanize.Bytes.Uint64(m.BytesRead),
523 1 : redact.Safe(m.Sublevels),
524 1 : redact.Safe(wampStr))
525 1 :
526 1 : if multiExists {
527 1 : w.Printf(" | %5s %5s %5s",
528 1 : humanize.Bytes.Uint64(m.MultiLevel.BytesInTop),
529 1 : humanize.Bytes.Uint64(m.MultiLevel.BytesIn),
530 1 : humanize.Bytes.Uint64(m.MultiLevel.BytesRead))
531 1 : }
532 1 : newline()
533 : }
534 :
535 1 : var total LevelMetrics
536 1 : for level := 0; level < numLevels; level++ {
537 1 : l := &m.Levels[level]
538 1 : w.Printf("%5d ", redact.Safe(level))
539 1 :
540 1 : // Format the score.
541 1 : score := math.NaN()
542 1 : if level < numLevels-1 {
543 1 : score = l.Score
544 1 : }
545 1 : formatRow(l, score)
546 1 : total.Add(l)
547 1 : total.Sublevels += l.Sublevels
548 : }
549 : // Compute total bytes-in as the bytes written to the WAL + bytes ingested.
550 1 : total.BytesIn = m.WAL.BytesWritten + total.BytesIngested
551 1 : // Add the total bytes-in to the total bytes-flushed. This is to account for
552 1 : // the bytes written to the log and bytes written externally and then
553 1 : // ingested.
554 1 : total.BytesFlushed += total.BytesIn
555 1 : w.SafeString("total ")
556 1 : formatRow(&total, math.NaN())
557 1 :
558 1 : w.SafeString("-------------------------------------------------------------------------------------------------------------------")
559 1 : appendIfMulti("--------------------")
560 1 : newline()
561 1 : w.Printf("WAL: %d files (%s) in: %s written: %s (%.0f%% overhead)",
562 1 : redact.Safe(m.WAL.Files),
563 1 : humanize.Bytes.Uint64(m.WAL.Size),
564 1 : humanize.Bytes.Uint64(m.WAL.BytesIn),
565 1 : humanize.Bytes.Uint64(m.WAL.BytesWritten),
566 1 : redact.Safe(percent(int64(m.WAL.BytesWritten)-int64(m.WAL.BytesIn), int64(m.WAL.BytesIn))))
567 1 : failoverStats := m.WAL.Failover
568 1 : failoverStats.FailoverWriteAndSyncLatency = nil
569 1 : if failoverStats == (wal.FailoverStats{}) {
570 1 : w.Printf("\n")
571 1 : } else {
572 0 : w.Printf(" failover: (switches: %d, primary: %s, secondary: %s)\n", m.WAL.Failover.DirSwitchCount,
573 0 : m.WAL.Failover.PrimaryWriteDuration.String(), m.WAL.Failover.SecondaryWriteDuration.String())
574 0 : }
575 :
576 1 : w.Printf("Flushes: %d\n", redact.Safe(m.Flush.Count))
577 1 :
578 1 : w.Printf("Compactions: %d estimated debt: %s in progress: %d (%s)\n",
579 1 : redact.Safe(m.Compact.Count),
580 1 : humanize.Bytes.Uint64(m.Compact.EstimatedDebt),
581 1 : redact.Safe(m.Compact.NumInProgress),
582 1 : humanize.Bytes.Int64(m.Compact.InProgressBytes))
583 1 :
584 1 : w.Printf(" default: %d delete: %d elision: %d move: %d read: %d tombstone-density: %d rewrite: %d copy: %d multi-level: %d\n",
585 1 : redact.Safe(m.Compact.DefaultCount),
586 1 : redact.Safe(m.Compact.DeleteOnlyCount),
587 1 : redact.Safe(m.Compact.ElisionOnlyCount),
588 1 : redact.Safe(m.Compact.MoveCount),
589 1 : redact.Safe(m.Compact.ReadCount),
590 1 : redact.Safe(m.Compact.TombstoneDensityCount),
591 1 : redact.Safe(m.Compact.RewriteCount),
592 1 : redact.Safe(m.Compact.CopyCount),
593 1 : redact.Safe(m.Compact.MultiLevelCount))
594 1 :
595 1 : w.Printf("MemTables: %d (%s) zombie: %d (%s)\n",
596 1 : redact.Safe(m.MemTable.Count),
597 1 : humanize.Bytes.Uint64(m.MemTable.Size),
598 1 : redact.Safe(m.MemTable.ZombieCount),
599 1 : humanize.Bytes.Uint64(m.MemTable.ZombieSize))
600 1 :
601 1 : w.Printf("Zombie tables: %d (%s, local: %s)\n",
602 1 : redact.Safe(m.Table.ZombieCount),
603 1 : humanize.Bytes.Uint64(m.Table.ZombieSize),
604 1 : humanize.Bytes.Uint64(m.Table.Local.ZombieSize))
605 1 :
606 1 : w.Printf("Backing tables: %d (%s)\n",
607 1 : redact.Safe(m.Table.BackingTableCount),
608 1 : humanize.Bytes.Uint64(m.Table.BackingTableSize))
609 1 : w.Printf("Virtual tables: %d (%s)\n",
610 1 : redact.Safe(m.NumVirtual()),
611 1 : humanize.Bytes.Uint64(m.VirtualSize()))
612 1 : w.Printf("Local tables size: %s\n", humanize.Bytes.Uint64(m.Table.Local.LiveSize))
613 1 : w.SafeString("Compression types:")
614 1 : if count := m.Table.CompressedCountSnappy; count > 0 {
615 1 : w.Printf(" snappy: %d", redact.Safe(count))
616 1 : }
617 1 : if count := m.Table.CompressedCountZstd; count > 0 {
618 0 : w.Printf(" zstd: %d", redact.Safe(count))
619 0 : }
620 1 : if count := m.Table.CompressedCountNone; count > 0 {
621 0 : w.Printf(" none: %d", redact.Safe(count))
622 0 : }
623 1 : if count := m.Table.CompressedCountUnknown; count > 0 {
624 1 : w.Printf(" unknown: %d", redact.Safe(count))
625 1 : }
626 1 : w.Print("\n")
627 1 :
628 1 : formatCacheMetrics := func(m *CacheMetrics, name redact.SafeString) {
629 1 : w.Printf("%s: %s entries (%s) hit rate: %.1f%%\n",
630 1 : name,
631 1 : humanize.Count.Int64(m.Count),
632 1 : humanize.Bytes.Int64(m.Size),
633 1 : redact.Safe(hitRate(m.Hits, m.Misses)))
634 1 : }
635 1 : formatCacheMetrics(&m.BlockCache, "Block cache")
636 1 : formatCacheMetrics(&m.TableCache, "Table cache")
637 1 :
638 1 : formatSharedCacheMetrics := func(w redact.SafePrinter, m *SecondaryCacheMetrics, name redact.SafeString) {
639 1 : w.Printf("%s: %s entries (%s) hit rate: %.1f%%\n",
640 1 : name,
641 1 : humanize.Count.Int64(m.Count),
642 1 : humanize.Bytes.Int64(m.Size),
643 1 : redact.Safe(hitRate(m.ReadsWithFullHit, m.ReadsWithPartialHit+m.ReadsWithNoHit)))
644 1 : }
645 1 : formatSharedCacheMetrics(w, &m.SecondaryCacheMetrics, "Secondary cache")
646 1 :
647 1 : w.Printf("Snapshots: %d earliest seq num: %d\n",
648 1 : redact.Safe(m.Snapshots.Count),
649 1 : redact.Safe(m.Snapshots.EarliestSeqNum))
650 1 :
651 1 : w.Printf("Table iters: %d\n", redact.Safe(m.TableIters))
652 1 : w.Printf("Filter utility: %.1f%%\n", redact.Safe(hitRate(m.Filter.Hits, m.Filter.Misses)))
653 1 : w.Printf("Ingestions: %d as flushable: %d (%s in %d tables)\n",
654 1 : redact.Safe(m.Ingest.Count),
655 1 : redact.Safe(m.Flush.AsIngestCount),
656 1 : humanize.Bytes.Uint64(m.Flush.AsIngestBytes),
657 1 : redact.Safe(m.Flush.AsIngestTableCount))
658 : }
659 :
660 1 : func hitRate(hits, misses int64) float64 {
661 1 : return percent(hits, hits+misses)
662 1 : }
663 :
664 1 : func percent(numerator, denominator int64) float64 {
665 1 : if denominator == 0 {
666 1 : return 0
667 1 : }
668 1 : return 100 * float64(numerator) / float64(denominator)
669 : }
670 :
671 : // StringForTests is identical to m.String() on 64-bit platforms. It is used to
672 : // provide a platform-independent result for tests.
673 1 : func (m *Metrics) StringForTests() string {
674 1 : mCopy := *m
675 1 : if math.MaxInt == math.MaxInt32 {
676 0 : // This is the difference in Sizeof(sstable.Reader{})) between 64 and 32 bit
677 0 : // platforms.
678 0 : const tableCacheSizeAdjustment = 212
679 0 : mCopy.TableCache.Size += mCopy.TableCache.Count * tableCacheSizeAdjustment
680 0 : }
681 1 : return redact.StringWithoutMarkers(&mCopy)
682 : }
|