Line data Source code
1 : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package pebble
6 :
7 : import (
8 : "fmt"
9 : "math"
10 : "time"
11 :
12 : "github.com/cockroachdb/pebble/internal/base"
13 : "github.com/cockroachdb/pebble/internal/cache"
14 : "github.com/cockroachdb/pebble/internal/humanize"
15 : "github.com/cockroachdb/pebble/objstorage/objstorageprovider/sharedcache"
16 : "github.com/cockroachdb/pebble/record"
17 : "github.com/cockroachdb/pebble/sstable"
18 : "github.com/cockroachdb/pebble/wal"
19 : "github.com/cockroachdb/redact"
20 : "github.com/prometheus/client_golang/prometheus"
21 : )
22 :
23 : // CacheMetrics holds metrics for the block and table cache.
24 : type CacheMetrics = cache.Metrics
25 :
26 : // FilterMetrics holds metrics for the filter policy
27 : type FilterMetrics = sstable.FilterMetrics
28 :
29 : // ThroughputMetric is a cumulative throughput metric. See the detailed
30 : // comment in base.
31 : type ThroughputMetric = base.ThroughputMetric
32 :
33 : // SecondaryCacheMetrics holds metrics for the persistent secondary cache
34 : // that caches commonly accessed blocks from blob storage on a local
35 : // file system.
36 : type SecondaryCacheMetrics = sharedcache.Metrics
37 :
38 : // LevelMetrics holds per-level metrics such as the number of files and total
39 : // size of the files, and compaction related metrics.
40 : type LevelMetrics struct {
41 : // The number of sublevels within the level. The sublevel count corresponds
42 : // to the read amplification for the level. An empty level will have a
43 : // sublevel count of 0, implying no read amplification. Only L0 will have
44 : // a sublevel count other than 0 or 1.
45 : Sublevels int32
46 : // The total number of files in the level.
47 : NumFiles int64
48 : // The total number of virtual sstables in the level.
49 : NumVirtualFiles uint64
50 : // The total size in bytes of the files in the level.
51 : Size int64
52 : // The total size of the virtual sstables in the level.
53 : VirtualSize uint64
54 : // The level's compaction score. This is the compensatedScoreRatio in the
55 : // candidateLevelInfo.
56 : Score float64
57 : // The number of incoming bytes from other levels read during
58 : // compactions. This excludes bytes moved and bytes ingested. For L0 this is
59 : // the bytes written to the WAL.
60 : BytesIn uint64
61 : // The number of bytes ingested. The sibling metric for tables is
62 : // TablesIngested.
63 : BytesIngested uint64
64 : // The number of bytes moved into the level by a "move" compaction. The
65 : // sibling metric for tables is TablesMoved.
66 : BytesMoved uint64
67 : // The number of bytes read for compactions at the level. This includes bytes
68 : // read from other levels (BytesIn), as well as bytes read for the level.
69 : BytesRead uint64
70 : // The number of bytes written during compactions. The sibling
71 : // metric for tables is TablesCompacted. This metric may be summed
72 : // with BytesFlushed to compute the total bytes written for the level.
73 : BytesCompacted uint64
74 : // The number of bytes written during flushes. The sibling
75 : // metrics for tables is TablesFlushed. This metric is always
76 : // zero for all levels other than L0.
77 : BytesFlushed uint64
78 : // The number of sstables compacted to this level.
79 : TablesCompacted uint64
80 : // The number of sstables flushed to this level.
81 : TablesFlushed uint64
82 : // The number of sstables ingested into the level.
83 : TablesIngested uint64
84 : // The number of sstables moved to this level by a "move" compaction.
85 : TablesMoved uint64
86 :
87 : MultiLevel struct {
88 : // BytesInTop are the total bytes in a multilevel compaction coming from the top level.
89 : BytesInTop uint64
90 :
91 : // BytesIn, exclusively for multiLevel compactions.
92 : BytesIn uint64
93 :
94 : // BytesRead, exclusively for multilevel compactions.
95 : BytesRead uint64
96 : }
97 :
98 : // Additional contains misc additional metrics that are not always printed.
99 : Additional struct {
100 : // The sum of Properties.ValueBlocksSize for all the sstables in this
101 : // level. Printed by LevelMetrics.format iff there is at least one level
102 : // with a non-zero value.
103 : ValueBlocksSize uint64
104 : // Cumulative metrics about bytes written to data blocks and value blocks,
105 : // via compactions (except move compactions) or flushes. Not printed by
106 : // LevelMetrics.format, but are available to sophisticated clients.
107 : BytesWrittenDataBlocks uint64
108 : BytesWrittenValueBlocks uint64
109 : }
110 : }
111 :
112 : // Add updates the counter metrics for the level.
113 1 : func (m *LevelMetrics) Add(u *LevelMetrics) {
114 1 : m.NumFiles += u.NumFiles
115 1 : m.NumVirtualFiles += u.NumVirtualFiles
116 1 : m.VirtualSize += u.VirtualSize
117 1 : m.Size += u.Size
118 1 : m.BytesIn += u.BytesIn
119 1 : m.BytesIngested += u.BytesIngested
120 1 : m.BytesMoved += u.BytesMoved
121 1 : m.BytesRead += u.BytesRead
122 1 : m.BytesCompacted += u.BytesCompacted
123 1 : m.BytesFlushed += u.BytesFlushed
124 1 : m.TablesCompacted += u.TablesCompacted
125 1 : m.TablesFlushed += u.TablesFlushed
126 1 : m.TablesIngested += u.TablesIngested
127 1 : m.TablesMoved += u.TablesMoved
128 1 : m.MultiLevel.BytesInTop += u.MultiLevel.BytesInTop
129 1 : m.MultiLevel.BytesRead += u.MultiLevel.BytesRead
130 1 : m.MultiLevel.BytesIn += u.MultiLevel.BytesIn
131 1 : m.Additional.BytesWrittenDataBlocks += u.Additional.BytesWrittenDataBlocks
132 1 : m.Additional.BytesWrittenValueBlocks += u.Additional.BytesWrittenValueBlocks
133 1 : m.Additional.ValueBlocksSize += u.Additional.ValueBlocksSize
134 1 : }
135 :
136 : // WriteAmp computes the write amplification for compactions at this
137 : // level. Computed as (BytesFlushed + BytesCompacted) / BytesIn.
138 0 : func (m *LevelMetrics) WriteAmp() float64 {
139 0 : if m.BytesIn == 0 {
140 0 : return 0
141 0 : }
142 0 : return float64(m.BytesFlushed+m.BytesCompacted) / float64(m.BytesIn)
143 : }
144 :
145 : // Metrics holds metrics for various subsystems of the DB such as the Cache,
146 : // Compactions, WAL, and per-Level metrics.
147 : //
148 : // TODO(peter): The testing of these metrics is relatively weak. There should
149 : // be testing that performs various operations on a DB and verifies that the
150 : // metrics reflect those operations.
151 : type Metrics struct {
152 : BlockCache CacheMetrics
153 :
154 : Compact struct {
155 : // The total number of compactions, and per-compaction type counts.
156 : Count int64
157 : DefaultCount int64
158 : DeleteOnlyCount int64
159 : ElisionOnlyCount int64
160 : MoveCount int64
161 : ReadCount int64
162 : RewriteCount int64
163 : MultiLevelCount int64
164 : CounterLevelCount int64
165 : // An estimate of the number of bytes that need to be compacted for the LSM
166 : // to reach a stable state.
167 : EstimatedDebt uint64
168 : // Number of bytes present in sstables being written by in-progress
169 : // compactions. This value will be zero if there are no in-progress
170 : // compactions.
171 : InProgressBytes int64
172 : // Number of compactions that are in-progress.
173 : NumInProgress int64
174 : // MarkedFiles is a count of files that are marked for
175 : // compaction. Such files are compacted in a rewrite compaction
176 : // when no other compactions are picked.
177 : MarkedFiles int
178 : // Duration records the cumulative duration of all compactions since the
179 : // database was opened.
180 : Duration time.Duration
181 : }
182 :
183 : Ingest struct {
184 : // The total number of ingestions
185 : Count uint64
186 : }
187 :
188 : Flush struct {
189 : // The total number of flushes.
190 : Count int64
191 : WriteThroughput ThroughputMetric
192 : // Number of flushes that are in-progress. In the current implementation
193 : // this will always be zero or one.
194 : NumInProgress int64
195 : // AsIngestCount is a monotonically increasing counter of flush operations
196 : // handling ingested tables.
197 : AsIngestCount uint64
198 : // AsIngestCount is a monotonically increasing counter of tables ingested as
199 : // flushables.
200 : AsIngestTableCount uint64
201 : // AsIngestBytes is a monotonically increasing counter of the bytes flushed
202 : // for flushables that originated as ingestion operations.
203 : AsIngestBytes uint64
204 : }
205 :
206 : Filter FilterMetrics
207 :
208 : Levels [numLevels]LevelMetrics
209 :
210 : MemTable struct {
211 : // The number of bytes allocated by memtables and large (flushable)
212 : // batches.
213 : Size uint64
214 : // The count of memtables.
215 : Count int64
216 : // The number of bytes present in zombie memtables which are no longer
217 : // referenced by the current DB state. An unbounded number of memtables
218 : // may be zombie if they're still in use by an iterator. One additional
219 : // memtable may be zombie if it's no longer in use and waiting to be
220 : // recycled.
221 : ZombieSize uint64
222 : // The count of zombie memtables.
223 : ZombieCount int64
224 : }
225 :
226 : Keys struct {
227 : // The approximate count of internal range key set keys in the database.
228 : RangeKeySetsCount uint64
229 : // The approximate count of internal tombstones (DEL, SINGLEDEL and
230 : // RANGEDEL key kinds) within the database.
231 : TombstoneCount uint64
232 : // A cumulative total number of missized DELSIZED keys encountered by
233 : // compactions since the database was opened.
234 : MissizedTombstonesCount uint64
235 : }
236 :
237 : Snapshots struct {
238 : // The number of currently open snapshots.
239 : Count int
240 : // The sequence number of the earliest, currently open snapshot.
241 : EarliestSeqNum uint64
242 : // A running tally of keys written to sstables during flushes or
243 : // compactions that would've been elided if it weren't for open
244 : // snapshots.
245 : PinnedKeys uint64
246 : // A running cumulative sum of the size of keys and values written to
247 : // sstables during flushes or compactions that would've been elided if
248 : // it weren't for open snapshots.
249 : PinnedSize uint64
250 : }
251 :
252 : Table struct {
253 : // The number of bytes present in obsolete tables which are no longer
254 : // referenced by the current DB state or any open iterators.
255 : ObsoleteSize uint64
256 : // The count of obsolete tables.
257 : ObsoleteCount int64
258 : // The number of bytes present in zombie tables which are no longer
259 : // referenced by the current DB state but are still in use by an iterator.
260 : ZombieSize uint64
261 : // The count of zombie tables.
262 : ZombieCount int64
263 : // The count of sstables backing virtual tables.
264 : BackingTableCount uint64
265 : // The sum of the sizes of the BackingTableCount sstables that are backing virtual tables.
266 : BackingTableSize uint64
267 :
268 : // Local file sizes.
269 : Local struct {
270 : // LiveSize is the number of bytes in live tables.
271 : LiveSize uint64
272 : // ObsoleteSize is the number of bytes in obsolete tables.
273 : ObsoleteSize uint64
274 : // ZombieSize is the number of bytes in zombie tables.
275 : ZombieSize uint64
276 : }
277 : }
278 :
279 : TableCache CacheMetrics
280 :
281 : // Count of the number of open sstable iterators.
282 : TableIters int64
283 : // Uptime is the total time since this DB was opened.
284 : Uptime time.Duration
285 :
286 : WAL struct {
287 : // Number of live WAL files.
288 : Files int64
289 : // Number of obsolete WAL files.
290 : ObsoleteFiles int64
291 : // Physical size of the obsolete WAL files.
292 : ObsoletePhysicalSize uint64
293 : // Size of the live data in the WAL files. Note that with WAL file
294 : // recycling this is less than the actual on-disk size of the WAL files.
295 : Size uint64
296 : // Physical size of the WAL files on-disk. With WAL file recycling,
297 : // this is greater than the live data in WAL files.
298 : //
299 : // TODO(sumeer): it seems this does not include ObsoletePhysicalSize.
300 : // Should the comment be updated?
301 : PhysicalSize uint64
302 : // Number of logical bytes written to the WAL.
303 : BytesIn uint64
304 : // Number of bytes written to the WAL.
305 : BytesWritten uint64
306 : // Failover contains failover stats. Empty if failover is not enabled.
307 : Failover wal.FailoverStats
308 : }
309 :
310 : LogWriter struct {
311 : FsyncLatency prometheus.Histogram
312 : record.LogWriterMetrics
313 : }
314 :
315 : CategoryStats []sstable.CategoryStatsAggregate
316 :
317 : SecondaryCacheMetrics SecondaryCacheMetrics
318 :
319 : private struct {
320 : optionsFileSize uint64
321 : manifestFileSize uint64
322 : }
323 : }
324 :
325 : var (
326 : // FsyncLatencyBuckets are prometheus histogram buckets suitable for a histogram
327 : // that records latencies for fsyncs.
328 : FsyncLatencyBuckets = append(
329 : prometheus.LinearBuckets(0.0, float64(time.Microsecond*100), 50),
330 : prometheus.ExponentialBucketsRange(float64(time.Millisecond*5), float64(10*time.Second), 50)...,
331 : )
332 :
333 : // SecondaryCacheIOBuckets exported to enable exporting from package pebble to
334 : // enable exporting metrics with below buckets in CRDB.
335 : SecondaryCacheIOBuckets = sharedcache.IOBuckets
336 : // SecondaryCacheChannelWriteBuckets exported to enable exporting from package
337 : // pebble to enable exporting metrics with below buckets in CRDB.
338 : SecondaryCacheChannelWriteBuckets = sharedcache.ChannelWriteBuckets
339 : )
340 :
341 : // DiskSpaceUsage returns the total disk space used by the database in bytes,
342 : // including live and obsolete files. This only includes local files, i.e.,
343 : // remote files (as known to objstorage.Provider) are not included.
344 0 : func (m *Metrics) DiskSpaceUsage() uint64 {
345 0 : var usageBytes uint64
346 0 : usageBytes += m.WAL.PhysicalSize
347 0 : usageBytes += m.WAL.ObsoletePhysicalSize
348 0 : usageBytes += m.Table.Local.LiveSize
349 0 : usageBytes += m.Table.Local.ObsoleteSize
350 0 : usageBytes += m.Table.Local.ZombieSize
351 0 : usageBytes += m.private.optionsFileSize
352 0 : usageBytes += m.private.manifestFileSize
353 0 : // TODO(sumeer): InProgressBytes does not distinguish between local and
354 0 : // remote files. This causes a small error. Fix.
355 0 : usageBytes += uint64(m.Compact.InProgressBytes)
356 0 : return usageBytes
357 0 : }
358 :
359 : // NumVirtual is the number of virtual sstables in the latest version
360 : // summed over every level in the lsm.
361 0 : func (m *Metrics) NumVirtual() uint64 {
362 0 : var n uint64
363 0 : for _, level := range m.Levels {
364 0 : n += level.NumVirtualFiles
365 0 : }
366 0 : return n
367 : }
368 :
369 : // VirtualSize is the sum of the sizes of the virtual sstables in the
370 : // latest version. BackingTableSize - VirtualSize gives an estimate for
371 : // the space amplification caused by not compacting virtual sstables.
372 0 : func (m *Metrics) VirtualSize() uint64 {
373 0 : var size uint64
374 0 : for _, level := range m.Levels {
375 0 : size += level.VirtualSize
376 0 : }
377 0 : return size
378 : }
379 :
380 : // ReadAmp returns the current read amplification of the database.
381 : // It's computed as the number of sublevels in L0 + the number of non-empty
382 : // levels below L0.
383 0 : func (m *Metrics) ReadAmp() int {
384 0 : var ramp int32
385 0 : for _, l := range m.Levels {
386 0 : ramp += l.Sublevels
387 0 : }
388 0 : return int(ramp)
389 : }
390 :
391 : // Total returns the sum of the per-level metrics and WAL metrics.
392 1 : func (m *Metrics) Total() LevelMetrics {
393 1 : var total LevelMetrics
394 1 : for level := 0; level < numLevels; level++ {
395 1 : l := &m.Levels[level]
396 1 : total.Add(l)
397 1 : total.Sublevels += l.Sublevels
398 1 : }
399 : // Compute total bytes-in as the bytes written to the WAL + bytes ingested.
400 1 : total.BytesIn = m.WAL.BytesWritten + total.BytesIngested
401 1 : // Add the total bytes-in to the total bytes-flushed. This is to account for
402 1 : // the bytes written to the log and bytes written externally and then
403 1 : // ingested.
404 1 : total.BytesFlushed += total.BytesIn
405 1 : return total
406 : }
407 :
408 : // String pretty-prints the metrics as below:
409 : //
410 : // | | | | ingested | moved | written | | amp
411 : // level | tables size val-bl vtables | score | in | tables size | tables size | tables size | read | r w
412 : // ------+-----------------------------+-------+-------+--------------+--------------+--------------+-------+---------
413 : // 0 | 101 102B 0B 0 | 103.0 | 104B | 112 104B | 113 106B | 221 217B | 107B | 1 2.1
414 : // 1 | 201 202B 0B 0 | 203.0 | 204B | 212 204B | 213 206B | 421 417B | 207B | 2 2.0
415 : // 2 | 301 302B 0B 0 | 303.0 | 304B | 312 304B | 313 306B | 621 617B | 307B | 3 2.0
416 : // 3 | 401 402B 0B 0 | 403.0 | 404B | 412 404B | 413 406B | 821 817B | 407B | 4 2.0
417 : // 4 | 501 502B 0B 0 | 503.0 | 504B | 512 504B | 513 506B | 1.0K 1017B | 507B | 5 2.0
418 : // 5 | 601 602B 0B 0 | 603.0 | 604B | 612 604B | 613 606B | 1.2K 1.2KB | 607B | 6 2.0
419 : // 6 | 701 702B 0B 0 | - | 704B | 712 704B | 713 706B | 1.4K 1.4KB | 707B | 7 2.0
420 : // total | 2.8K 2.7KB 0B 0 | - | 2.8KB | 2.9K 2.8KB | 2.9K 2.8KB | 5.7K 8.4KB | 2.8KB | 28 3.0
421 : // -------------------------------------------------------------------------------------------------------------------
422 : // WAL: 22 files (24B) in: 25B written: 26B (4% overhead)
423 : // Flushes: 8
424 : // Compactions: 5 estimated debt: 6B in progress: 2 (7B)
425 : // default: 27 delete: 28 elision: 29 move: 30 read: 31 rewrite: 32 multi-level: 33
426 : // MemTables: 12 (11B) zombie: 14 (13B)
427 : // Zombie tables: 16 (15B)
428 : // Backing tables: 0 (0B)
429 : // Block cache: 2 entries (1B) hit rate: 42.9%
430 : // Table cache: 18 entries (17B) hit rate: 48.7%
431 : // Secondary cache: 40 entries (40B) hit rate: 49.9%
432 : // Snapshots: 4 earliest seq num: 1024
433 : // Table iters: 21
434 : // Filter utility: 47.4%
435 : // Ingestions: 27 as flushable: 36 (34B in 35 tables)
436 0 : func (m *Metrics) String() string {
437 0 : return redact.StringWithoutMarkers(m)
438 0 : }
439 :
440 : var _ redact.SafeFormatter = &Metrics{}
441 :
442 : // SafeFormat implements redact.SafeFormatter.
443 0 : func (m *Metrics) SafeFormat(w redact.SafePrinter, _ rune) {
444 0 : // NB: Pebble does not make any assumptions as to which Go primitive types
445 0 : // have been registered as safe with redact.RegisterSafeType and does not
446 0 : // register any types itself. Some of the calls to `redact.Safe`, etc are
447 0 : // superfluous in the context of CockroachDB, which registers all the Go
448 0 : // numeric types as safe.
449 0 :
450 0 : // TODO(jackson): There are a few places where we use redact.SafeValue
451 0 : // instead of redact.RedactableString. This is necessary because of a bug
452 0 : // whereby formatting a redact.RedactableString argument does not respect
453 0 : // width specifiers. When the issue is fixed, we can convert these to
454 0 : // RedactableStrings. https://github.com/cockroachdb/redact/issues/17
455 0 :
456 0 : multiExists := m.Compact.MultiLevelCount > 0
457 0 : appendIfMulti := func(line redact.SafeString) {
458 0 : if multiExists {
459 0 : w.SafeString(line)
460 0 : }
461 : }
462 0 : newline := func() {
463 0 : w.SafeString("\n")
464 0 : }
465 :
466 0 : w.SafeString(" | | | | ingested | moved | written | | amp")
467 0 : appendIfMulti(" | multilevel")
468 0 : newline()
469 0 : w.SafeString("level | tables size val-bl vtables | score | in | tables size | tables size | tables size | read | r w")
470 0 : appendIfMulti(" | top in read")
471 0 : newline()
472 0 : w.SafeString("------+-----------------------------+-------+-------+--------------+--------------+--------------+-------+---------")
473 0 : appendIfMulti("-+------------------")
474 0 : newline()
475 0 :
476 0 : // formatRow prints out a row of the table.
477 0 : formatRow := func(m *LevelMetrics, score float64) {
478 0 : scoreStr := "-"
479 0 : if !math.IsNaN(score) {
480 0 : // Try to keep the string no longer than 5 characters.
481 0 : switch {
482 0 : case score < 99.995:
483 0 : scoreStr = fmt.Sprintf("%.2f", score)
484 0 : case score < 999.95:
485 0 : scoreStr = fmt.Sprintf("%.1f", score)
486 0 : default:
487 0 : scoreStr = fmt.Sprintf("%.0f", score)
488 : }
489 : }
490 0 : var wampStr string
491 0 : if wamp := m.WriteAmp(); wamp > 99.5 {
492 0 : wampStr = fmt.Sprintf("%.0f", wamp)
493 0 : } else {
494 0 : wampStr = fmt.Sprintf("%.1f", wamp)
495 0 : }
496 :
497 0 : w.Printf("| %5s %6s %6s %7s | %5s | %5s | %5s %6s | %5s %6s | %5s %6s | %5s | %3d %4s",
498 0 : humanize.Count.Int64(m.NumFiles),
499 0 : humanize.Bytes.Int64(m.Size),
500 0 : humanize.Bytes.Uint64(m.Additional.ValueBlocksSize),
501 0 : humanize.Count.Uint64(m.NumVirtualFiles),
502 0 : redact.Safe(scoreStr),
503 0 : humanize.Bytes.Uint64(m.BytesIn),
504 0 : humanize.Count.Uint64(m.TablesIngested),
505 0 : humanize.Bytes.Uint64(m.BytesIngested),
506 0 : humanize.Count.Uint64(m.TablesMoved),
507 0 : humanize.Bytes.Uint64(m.BytesMoved),
508 0 : humanize.Count.Uint64(m.TablesFlushed+m.TablesCompacted),
509 0 : humanize.Bytes.Uint64(m.BytesFlushed+m.BytesCompacted),
510 0 : humanize.Bytes.Uint64(m.BytesRead),
511 0 : redact.Safe(m.Sublevels),
512 0 : redact.Safe(wampStr))
513 0 :
514 0 : if multiExists {
515 0 : w.Printf(" | %5s %5s %5s",
516 0 : humanize.Bytes.Uint64(m.MultiLevel.BytesInTop),
517 0 : humanize.Bytes.Uint64(m.MultiLevel.BytesIn),
518 0 : humanize.Bytes.Uint64(m.MultiLevel.BytesRead))
519 0 : }
520 0 : newline()
521 : }
522 :
523 0 : var total LevelMetrics
524 0 : for level := 0; level < numLevels; level++ {
525 0 : l := &m.Levels[level]
526 0 : w.Printf("%5d ", redact.Safe(level))
527 0 :
528 0 : // Format the score.
529 0 : score := math.NaN()
530 0 : if level < numLevels-1 {
531 0 : score = l.Score
532 0 : }
533 0 : formatRow(l, score)
534 0 : total.Add(l)
535 0 : total.Sublevels += l.Sublevels
536 : }
537 : // Compute total bytes-in as the bytes written to the WAL + bytes ingested.
538 0 : total.BytesIn = m.WAL.BytesWritten + total.BytesIngested
539 0 : // Add the total bytes-in to the total bytes-flushed. This is to account for
540 0 : // the bytes written to the log and bytes written externally and then
541 0 : // ingested.
542 0 : total.BytesFlushed += total.BytesIn
543 0 : w.SafeString("total ")
544 0 : formatRow(&total, math.NaN())
545 0 :
546 0 : w.SafeString("-------------------------------------------------------------------------------------------------------------------")
547 0 : appendIfMulti("--------------------")
548 0 : newline()
549 0 : w.Printf("WAL: %d files (%s) in: %s written: %s (%.0f%% overhead)",
550 0 : redact.Safe(m.WAL.Files),
551 0 : humanize.Bytes.Uint64(m.WAL.Size),
552 0 : humanize.Bytes.Uint64(m.WAL.BytesIn),
553 0 : humanize.Bytes.Uint64(m.WAL.BytesWritten),
554 0 : redact.Safe(percent(int64(m.WAL.BytesWritten)-int64(m.WAL.BytesIn), int64(m.WAL.BytesIn))))
555 0 : if m.WAL.Failover == (wal.FailoverStats{}) {
556 0 : w.Printf("\n")
557 0 : } else {
558 0 : w.Printf(" failover: (switches: %d, primary: %s, secondary: %s)\n", m.WAL.Failover.DirSwitchCount,
559 0 : m.WAL.Failover.PrimaryWriteDuration.String(), m.WAL.Failover.SecondaryWriteDuration.String())
560 0 : }
561 :
562 0 : w.Printf("Flushes: %d\n", redact.Safe(m.Flush.Count))
563 0 :
564 0 : w.Printf("Compactions: %d estimated debt: %s in progress: %d (%s)\n",
565 0 : redact.Safe(m.Compact.Count),
566 0 : humanize.Bytes.Uint64(m.Compact.EstimatedDebt),
567 0 : redact.Safe(m.Compact.NumInProgress),
568 0 : humanize.Bytes.Int64(m.Compact.InProgressBytes))
569 0 :
570 0 : w.Printf(" default: %d delete: %d elision: %d move: %d read: %d rewrite: %d multi-level: %d\n",
571 0 : redact.Safe(m.Compact.DefaultCount),
572 0 : redact.Safe(m.Compact.DeleteOnlyCount),
573 0 : redact.Safe(m.Compact.ElisionOnlyCount),
574 0 : redact.Safe(m.Compact.MoveCount),
575 0 : redact.Safe(m.Compact.ReadCount),
576 0 : redact.Safe(m.Compact.RewriteCount),
577 0 : redact.Safe(m.Compact.MultiLevelCount))
578 0 :
579 0 : w.Printf("MemTables: %d (%s) zombie: %d (%s)\n",
580 0 : redact.Safe(m.MemTable.Count),
581 0 : humanize.Bytes.Uint64(m.MemTable.Size),
582 0 : redact.Safe(m.MemTable.ZombieCount),
583 0 : humanize.Bytes.Uint64(m.MemTable.ZombieSize))
584 0 :
585 0 : w.Printf("Zombie tables: %d (%s, local: %s)\n",
586 0 : redact.Safe(m.Table.ZombieCount),
587 0 : humanize.Bytes.Uint64(m.Table.ZombieSize),
588 0 : humanize.Bytes.Uint64(m.Table.Local.ZombieSize))
589 0 :
590 0 : w.Printf("Backing tables: %d (%s)\n",
591 0 : redact.Safe(m.Table.BackingTableCount),
592 0 : humanize.Bytes.Uint64(m.Table.BackingTableSize))
593 0 : w.Printf("Virtual tables: %d (%s)\n",
594 0 : redact.Safe(m.NumVirtual()),
595 0 : humanize.Bytes.Uint64(m.VirtualSize()))
596 0 : w.Printf("Local tables size: %s\n", humanize.Bytes.Uint64(m.Table.Local.LiveSize))
597 0 :
598 0 : formatCacheMetrics := func(m *CacheMetrics, name redact.SafeString) {
599 0 : w.Printf("%s: %s entries (%s) hit rate: %.1f%%\n",
600 0 : name,
601 0 : humanize.Count.Int64(m.Count),
602 0 : humanize.Bytes.Int64(m.Size),
603 0 : redact.Safe(hitRate(m.Hits, m.Misses)))
604 0 : }
605 0 : formatCacheMetrics(&m.BlockCache, "Block cache")
606 0 : formatCacheMetrics(&m.TableCache, "Table cache")
607 0 :
608 0 : formatSharedCacheMetrics := func(w redact.SafePrinter, m *SecondaryCacheMetrics, name redact.SafeString) {
609 0 : w.Printf("%s: %s entries (%s) hit rate: %.1f%%\n",
610 0 : name,
611 0 : humanize.Count.Int64(m.Count),
612 0 : humanize.Bytes.Int64(m.Size),
613 0 : redact.Safe(hitRate(m.ReadsWithFullHit, m.ReadsWithPartialHit+m.ReadsWithNoHit)))
614 0 : }
615 0 : formatSharedCacheMetrics(w, &m.SecondaryCacheMetrics, "Secondary cache")
616 0 :
617 0 : w.Printf("Snapshots: %d earliest seq num: %d\n",
618 0 : redact.Safe(m.Snapshots.Count),
619 0 : redact.Safe(m.Snapshots.EarliestSeqNum))
620 0 :
621 0 : w.Printf("Table iters: %d\n", redact.Safe(m.TableIters))
622 0 : w.Printf("Filter utility: %.1f%%\n", redact.Safe(hitRate(m.Filter.Hits, m.Filter.Misses)))
623 0 : w.Printf("Ingestions: %d as flushable: %d (%s in %d tables)\n",
624 0 : redact.Safe(m.Ingest.Count),
625 0 : redact.Safe(m.Flush.AsIngestCount),
626 0 : humanize.Bytes.Uint64(m.Flush.AsIngestBytes),
627 0 : redact.Safe(m.Flush.AsIngestTableCount))
628 : }
629 :
630 0 : func hitRate(hits, misses int64) float64 {
631 0 : return percent(hits, hits+misses)
632 0 : }
633 :
634 0 : func percent(numerator, denominator int64) float64 {
635 0 : if denominator == 0 {
636 0 : return 0
637 0 : }
638 0 : return 100 * float64(numerator) / float64(denominator)
639 : }
640 :
641 : // StringForTests is identical to m.String() on 64-bit platforms. It is used to
642 : // provide a platform-independent result for tests.
643 0 : func (m *Metrics) StringForTests() string {
644 0 : mCopy := *m
645 0 : if math.MaxInt == math.MaxInt32 {
646 0 : // This is the difference in Sizeof(sstable.Reader{})) between 64 and 32 bit
647 0 : // platforms.
648 0 : const tableCacheSizeAdjustment = 212
649 0 : mCopy.TableCache.Size += mCopy.TableCache.Count * tableCacheSizeAdjustment
650 0 : }
651 0 : return redact.StringWithoutMarkers(&mCopy)
652 : }
|