Line data Source code
1 : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package pebble
6 :
7 : import (
8 : "fmt"
9 : "math"
10 : "time"
11 :
12 : "github.com/cockroachdb/pebble/internal/base"
13 : "github.com/cockroachdb/pebble/internal/cache"
14 : "github.com/cockroachdb/pebble/internal/humanize"
15 : "github.com/cockroachdb/pebble/objstorage/objstorageprovider/sharedcache"
16 : "github.com/cockroachdb/pebble/record"
17 : "github.com/cockroachdb/pebble/sstable"
18 : "github.com/cockroachdb/redact"
19 : "github.com/prometheus/client_golang/prometheus"
20 : )
21 :
22 : // CacheMetrics holds metrics for the block and table cache.
23 : type CacheMetrics = cache.Metrics
24 :
25 : // FilterMetrics holds metrics for the filter policy
26 : type FilterMetrics = sstable.FilterMetrics
27 :
28 : // ThroughputMetric is a cumulative throughput metric. See the detailed
29 : // comment in base.
30 : type ThroughputMetric = base.ThroughputMetric
31 :
32 : // SecondaryCacheMetrics holds metrics for the persistent secondary cache
33 : // that caches commonly accessed blocks from blob storage on a local
34 : // file system.
35 : type SecondaryCacheMetrics = sharedcache.Metrics
36 :
37 : // LevelMetrics holds per-level metrics such as the number of files and total
38 : // size of the files, and compaction related metrics.
39 : type LevelMetrics struct {
40 : // The number of sublevels within the level. The sublevel count corresponds
41 : // to the read amplification for the level. An empty level will have a
42 : // sublevel count of 0, implying no read amplification. Only L0 will have
43 : // a sublevel count other than 0 or 1.
44 : Sublevels int32
45 : // The total number of files in the level.
46 : NumFiles int64
47 : // The total size in bytes of the files in the level.
48 : Size int64
49 : // The level's compaction score.
50 : Score float64
51 : // The number of incoming bytes from other levels read during
52 : // compactions. This excludes bytes moved and bytes ingested. For L0 this is
53 : // the bytes written to the WAL.
54 : BytesIn uint64
55 : // The number of bytes ingested. The sibling metric for tables is
56 : // TablesIngested.
57 : BytesIngested uint64
58 : // The number of bytes moved into the level by a "move" compaction. The
59 : // sibling metric for tables is TablesMoved.
60 : BytesMoved uint64
61 : // The number of bytes read for compactions at the level. This includes bytes
62 : // read from other levels (BytesIn), as well as bytes read for the level.
63 : BytesRead uint64
64 : // The number of bytes written during compactions. The sibling
65 : // metric for tables is TablesCompacted. This metric may be summed
66 : // with BytesFlushed to compute the total bytes written for the level.
67 : BytesCompacted uint64
68 : // The number of bytes written during flushes. The sibling
69 : // metrics for tables is TablesFlushed. This metric is always
70 : // zero for all levels other than L0.
71 : BytesFlushed uint64
72 : // The number of sstables compacted to this level.
73 : TablesCompacted uint64
74 : // The number of sstables flushed to this level.
75 : TablesFlushed uint64
76 : // The number of sstables ingested into the level.
77 : TablesIngested uint64
78 : // The number of sstables moved to this level by a "move" compaction.
79 : TablesMoved uint64
80 :
81 : MultiLevel struct {
82 : // BytesInTop are the total bytes in a multilevel compaction coming from the top level.
83 : BytesInTop uint64
84 :
85 : // BytesIn, exclusively for multiLevel compactions.
86 : BytesIn uint64
87 :
88 : // BytesRead, exclusively for multilevel compactions.
89 : BytesRead uint64
90 : }
91 :
92 : // Additional contains misc additional metrics that are not always printed.
93 : Additional struct {
94 : // The sum of Properties.ValueBlocksSize for all the sstables in this
95 : // level. Printed by LevelMetrics.format iff there is at least one level
96 : // with a non-zero value.
97 : ValueBlocksSize uint64
98 : // Cumulative metrics about bytes written to data blocks and value blocks,
99 : // via compactions (except move compactions) or flushes. Not printed by
100 : // LevelMetrics.format, but are available to sophisticated clients.
101 : BytesWrittenDataBlocks uint64
102 : BytesWrittenValueBlocks uint64
103 : }
104 : }
105 :
106 : // Add updates the counter metrics for the level.
107 1 : func (m *LevelMetrics) Add(u *LevelMetrics) {
108 1 : m.NumFiles += u.NumFiles
109 1 : m.Size += u.Size
110 1 : m.BytesIn += u.BytesIn
111 1 : m.BytesIngested += u.BytesIngested
112 1 : m.BytesMoved += u.BytesMoved
113 1 : m.BytesRead += u.BytesRead
114 1 : m.BytesCompacted += u.BytesCompacted
115 1 : m.BytesFlushed += u.BytesFlushed
116 1 : m.TablesCompacted += u.TablesCompacted
117 1 : m.TablesFlushed += u.TablesFlushed
118 1 : m.TablesIngested += u.TablesIngested
119 1 : m.TablesMoved += u.TablesMoved
120 1 : m.MultiLevel.BytesInTop += u.MultiLevel.BytesInTop
121 1 : m.MultiLevel.BytesRead += u.MultiLevel.BytesRead
122 1 : m.MultiLevel.BytesIn += u.MultiLevel.BytesIn
123 1 : m.Additional.BytesWrittenDataBlocks += u.Additional.BytesWrittenDataBlocks
124 1 : m.Additional.BytesWrittenValueBlocks += u.Additional.BytesWrittenValueBlocks
125 1 : m.Additional.ValueBlocksSize += u.Additional.ValueBlocksSize
126 1 : }
127 :
128 : // WriteAmp computes the write amplification for compactions at this
129 : // level. Computed as (BytesFlushed + BytesCompacted) / BytesIn.
130 0 : func (m *LevelMetrics) WriteAmp() float64 {
131 0 : if m.BytesIn == 0 {
132 0 : return 0
133 0 : }
134 0 : return float64(m.BytesFlushed+m.BytesCompacted) / float64(m.BytesIn)
135 : }
136 :
137 : // Metrics holds metrics for various subsystems of the DB such as the Cache,
138 : // Compactions, WAL, and per-Level metrics.
139 : //
140 : // TODO(peter): The testing of these metrics is relatively weak. There should
141 : // be testing that performs various operations on a DB and verifies that the
142 : // metrics reflect those operations.
143 : type Metrics struct {
144 : BlockCache CacheMetrics
145 :
146 : Compact struct {
147 : // The total number of compactions, and per-compaction type counts.
148 : Count int64
149 : DefaultCount int64
150 : DeleteOnlyCount int64
151 : ElisionOnlyCount int64
152 : MoveCount int64
153 : ReadCount int64
154 : RewriteCount int64
155 : MultiLevelCount int64
156 : CounterLevelCount int64
157 : // An estimate of the number of bytes that need to be compacted for the LSM
158 : // to reach a stable state.
159 : EstimatedDebt uint64
160 : // Number of bytes present in sstables being written by in-progress
161 : // compactions. This value will be zero if there are no in-progress
162 : // compactions.
163 : InProgressBytes int64
164 : // Number of compactions that are in-progress.
165 : NumInProgress int64
166 : // MarkedFiles is a count of files that are marked for
167 : // compaction. Such files are compacted in a rewrite compaction
168 : // when no other compactions are picked.
169 : MarkedFiles int
170 : // Duration records the cumulative duration of all compactions since the
171 : // database was opened.
172 : Duration time.Duration
173 : }
174 :
175 : Ingest struct {
176 : // The total number of ingestions
177 : Count uint64
178 : }
179 :
180 : Flush struct {
181 : // The total number of flushes.
182 : Count int64
183 : WriteThroughput ThroughputMetric
184 : // Number of flushes that are in-progress. In the current implementation
185 : // this will always be zero or one.
186 : NumInProgress int64
187 : // AsIngestCount is a monotonically increasing counter of flush operations
188 : // handling ingested tables.
189 : AsIngestCount uint64
190 : // AsIngestCount is a monotonically increasing counter of tables ingested as
191 : // flushables.
192 : AsIngestTableCount uint64
193 : // AsIngestBytes is a monotonically increasing counter of the bytes flushed
194 : // for flushables that originated as ingestion operations.
195 : AsIngestBytes uint64
196 : }
197 :
198 : Filter FilterMetrics
199 :
200 : Levels [numLevels]LevelMetrics
201 :
202 : MemTable struct {
203 : // The number of bytes allocated by memtables and large (flushable)
204 : // batches.
205 : Size uint64
206 : // The count of memtables.
207 : Count int64
208 : // The number of bytes present in zombie memtables which are no longer
209 : // referenced by the current DB state. An unbounded number of memtables
210 : // may be zombie if they're still in use by an iterator. One additional
211 : // memtable may be zombie if it's no longer in use and waiting to be
212 : // recycled.
213 : ZombieSize uint64
214 : // The count of zombie memtables.
215 : ZombieCount int64
216 : }
217 :
218 : Keys struct {
219 : // The approximate count of internal range key set keys in the database.
220 : RangeKeySetsCount uint64
221 : // The approximate count of internal tombstones (DEL, SINGLEDEL and
222 : // RANGEDEL key kinds) within the database.
223 : TombstoneCount uint64
224 : // A cumulative total number of missized DELSIZED keys encountered by
225 : // compactions since the database was opened.
226 : MissizedTombstonesCount uint64
227 : }
228 :
229 : Snapshots struct {
230 : // The number of currently open snapshots.
231 : Count int
232 : // The sequence number of the earliest, currently open snapshot.
233 : EarliestSeqNum uint64
234 : // A running tally of keys written to sstables during flushes or
235 : // compactions that would've been elided if it weren't for open
236 : // snapshots.
237 : PinnedKeys uint64
238 : // A running cumulative sum of the size of keys and values written to
239 : // sstables during flushes or compactions that would've been elided if
240 : // it weren't for open snapshots.
241 : PinnedSize uint64
242 : }
243 :
244 : Table struct {
245 : // The number of bytes present in obsolete tables which are no longer
246 : // referenced by the current DB state or any open iterators.
247 : ObsoleteSize uint64
248 : // The count of obsolete tables.
249 : ObsoleteCount int64
250 : // The number of bytes present in zombie tables which are no longer
251 : // referenced by the current DB state but are still in use by an iterator.
252 : ZombieSize uint64
253 : // The count of zombie tables.
254 : ZombieCount int64
255 : }
256 :
257 : TableCache CacheMetrics
258 :
259 : // Count of the number of open sstable iterators.
260 : TableIters int64
261 : // Uptime is the total time since this DB was opened.
262 : Uptime time.Duration
263 :
264 : WAL struct {
265 : // Number of live WAL files.
266 : Files int64
267 : // Number of obsolete WAL files.
268 : ObsoleteFiles int64
269 : // Physical size of the obsolete WAL files.
270 : ObsoletePhysicalSize uint64
271 : // Size of the live data in the WAL files. Note that with WAL file
272 : // recycling this is less than the actual on-disk size of the WAL files.
273 : Size uint64
274 : // Physical size of the WAL files on-disk. With WAL file recycling,
275 : // this is greater than the live data in WAL files.
276 : PhysicalSize uint64
277 : // Number of logical bytes written to the WAL.
278 : BytesIn uint64
279 : // Number of bytes written to the WAL.
280 : BytesWritten uint64
281 : }
282 :
283 : LogWriter struct {
284 : FsyncLatency prometheus.Histogram
285 : record.LogWriterMetrics
286 : }
287 :
288 : SecondaryCacheMetrics SecondaryCacheMetrics
289 :
290 : private struct {
291 : optionsFileSize uint64
292 : manifestFileSize uint64
293 : }
294 : }
295 :
296 : var (
297 : // FsyncLatencyBuckets are prometheus histogram buckets suitable for a histogram
298 : // that records latencies for fsyncs.
299 : FsyncLatencyBuckets = append(
300 : prometheus.LinearBuckets(0.0, float64(time.Microsecond*100), 50),
301 : prometheus.ExponentialBucketsRange(float64(time.Millisecond*5), float64(10*time.Second), 50)...,
302 : )
303 :
304 : // SecondaryCacheIOBuckets exported to enable exporting from package pebble to
305 : // enable exporting metrics with below buckets in CRDB.
306 : SecondaryCacheIOBuckets = sharedcache.IOBuckets
307 : // SecondaryCacheChannelWriteBuckets exported to enable exporting from package
308 : // pebble to enable exporting metrics with below buckets in CRDB.
309 : SecondaryCacheChannelWriteBuckets = sharedcache.ChannelWriteBuckets
310 : )
311 :
312 : // DiskSpaceUsage returns the total disk space used by the database in bytes,
313 : // including live and obsolete files.
314 0 : func (m *Metrics) DiskSpaceUsage() uint64 {
315 0 : var usageBytes uint64
316 0 : usageBytes += m.WAL.PhysicalSize
317 0 : usageBytes += m.WAL.ObsoletePhysicalSize
318 0 : for _, lm := range m.Levels {
319 0 : usageBytes += uint64(lm.Size)
320 0 : }
321 0 : usageBytes += m.Table.ObsoleteSize
322 0 : usageBytes += m.Table.ZombieSize
323 0 : usageBytes += m.private.optionsFileSize
324 0 : usageBytes += m.private.manifestFileSize
325 0 : usageBytes += uint64(m.Compact.InProgressBytes)
326 0 : return usageBytes
327 : }
328 :
329 : // ReadAmp returns the current read amplification of the database.
330 : // It's computed as the number of sublevels in L0 + the number of non-empty
331 : // levels below L0.
332 0 : func (m *Metrics) ReadAmp() int {
333 0 : var ramp int32
334 0 : for _, l := range m.Levels {
335 0 : ramp += l.Sublevels
336 0 : }
337 0 : return int(ramp)
338 : }
339 :
340 : // Total returns the sum of the per-level metrics and WAL metrics.
341 1 : func (m *Metrics) Total() LevelMetrics {
342 1 : var total LevelMetrics
343 1 : for level := 0; level < numLevels; level++ {
344 1 : l := &m.Levels[level]
345 1 : total.Add(l)
346 1 : total.Sublevels += l.Sublevels
347 1 : }
348 : // Compute total bytes-in as the bytes written to the WAL + bytes ingested.
349 1 : total.BytesIn = m.WAL.BytesWritten + total.BytesIngested
350 1 : // Add the total bytes-in to the total bytes-flushed. This is to account for
351 1 : // the bytes written to the log and bytes written externally and then
352 1 : // ingested.
353 1 : total.BytesFlushed += total.BytesIn
354 1 : return total
355 : }
356 :
357 : // String pretty-prints the metrics as below:
358 : //
359 : // | | | | ingested | moved | written | | amp
360 : // level | tables size val-bl | score | in | tables size | tables size | tables size | read | r w
361 : // ------+---------------------+-------+-------+--------------+--------------+--------------+-------+---------
362 : // 0 | 101 102B 0B | 103.0 | 104B | 112 104B | 113 106B | 221 217B | 107B | 1 2.1
363 : // 1 | 201 202B 0B | 203.0 | 204B | 212 204B | 213 206B | 421 417B | 207B | 2 2.0
364 : // 2 | 301 302B 0B | 303.0 | 304B | 312 304B | 313 306B | 621 617B | 307B | 3 2.0
365 : // 3 | 401 402B 0B | 403.0 | 404B | 412 404B | 413 406B | 821 817B | 407B | 4 2.0
366 : // 4 | 501 502B 0B | 503.0 | 504B | 512 504B | 513 506B | 1.0K 1017B | 507B | 5 2.0
367 : // 5 | 601 602B 0B | 603.0 | 604B | 612 604B | 613 606B | 1.2K 1.2KB | 607B | 6 2.0
368 : // 6 | 701 702B 0B | - | 704B | 712 704B | 713 706B | 1.4K 1.4KB | 707B | 7 2.0
369 : // total | 2.8K 2.7KB 0B | - | 2.8KB | 2.9K 2.8KB | 2.9K 2.8KB | 5.7K 8.4KB | 2.8KB | 28 3.0
370 : // -----------------------------------------------------------------------------------------------------------
371 : // WAL: 22 files (24B) in: 25B written: 26B (4% overhead)
372 : // Flushes: 8
373 : // Compactions: 5 estimated debt: 6B in progress: 2 (7B)
374 : // default: 27 delete: 28 elision: 29 move: 30 read: 31 rewrite: 32 multi-level: 33
375 : // MemTables: 12 (11B) zombie: 14 (13B)
376 : // Zombie tables: 16 (15B)
377 : // Block cache: 2 entries (1B) hit rate: 42.9%
378 : // Table cache: 18 entries (17B) hit rate: 48.7%
379 : // Secondary cache: 40 entries (40B) hit rate: 49.9%
380 : // Snapshots: 4 earliest seq num: 1024
381 : // Table iters: 21
382 : // Filter utility: 47.4%
383 : // Ingestions: 27 as flushable: 36 (34B in 35 tables)
384 0 : func (m *Metrics) String() string {
385 0 : return redact.StringWithoutMarkers(m)
386 0 : }
387 :
388 : var _ redact.SafeFormatter = &Metrics{}
389 :
390 : // SafeFormat implements redact.SafeFormatter.
391 0 : func (m *Metrics) SafeFormat(w redact.SafePrinter, _ rune) {
392 0 : // NB: Pebble does not make any assumptions as to which Go primitive types
393 0 : // have been registered as safe with redact.RegisterSafeType and does not
394 0 : // register any types itself. Some of the calls to `redact.Safe`, etc are
395 0 : // superfluous in the context of CockroachDB, which registers all the Go
396 0 : // numeric types as safe.
397 0 :
398 0 : // TODO(jackson): There are a few places where we use redact.SafeValue
399 0 : // instead of redact.RedactableString. This is necessary because of a bug
400 0 : // whereby formatting a redact.RedactableString argument does not respect
401 0 : // width specifiers. When the issue is fixed, we can convert these to
402 0 : // RedactableStrings. https://github.com/cockroachdb/redact/issues/17
403 0 :
404 0 : multiExists := m.Compact.MultiLevelCount > 0
405 0 : appendIfMulti := func(line redact.SafeString) {
406 0 : if multiExists {
407 0 : w.SafeString(line)
408 0 : }
409 : }
410 0 : newline := func() {
411 0 : w.SafeString("\n")
412 0 : }
413 :
414 0 : w.SafeString(" | | | | ingested | moved | written | | amp")
415 0 : appendIfMulti(" | multilevel")
416 0 : newline()
417 0 : w.SafeString("level | tables size val-bl | score | in | tables size | tables size | tables size | read | r w")
418 0 : appendIfMulti(" | top in read")
419 0 : newline()
420 0 : w.SafeString("------+---------------------+-------+-------+--------------+--------------+--------------+-------+---------")
421 0 : appendIfMulti("-+------------------")
422 0 : newline()
423 0 :
424 0 : // formatRow prints out a row of the table.
425 0 : formatRow := func(m *LevelMetrics, score float64) {
426 0 : scoreStr := "-"
427 0 : if !math.IsNaN(score) {
428 0 : // Try to keep the string no longer than 5 characters.
429 0 : switch {
430 0 : case score < 99.995:
431 0 : scoreStr = fmt.Sprintf("%.2f", score)
432 0 : case score < 999.95:
433 0 : scoreStr = fmt.Sprintf("%.1f", score)
434 0 : default:
435 0 : scoreStr = fmt.Sprintf("%.0f", score)
436 : }
437 : }
438 0 : var wampStr string
439 0 : if wamp := m.WriteAmp(); wamp > 99.5 {
440 0 : wampStr = fmt.Sprintf("%.0f", wamp)
441 0 : } else {
442 0 : wampStr = fmt.Sprintf("%.1f", wamp)
443 0 : }
444 :
445 0 : w.Printf("| %5s %6s %6s | %5s | %5s | %5s %6s | %5s %6s | %5s %6s | %5s | %3d %4s",
446 0 : humanize.Count.Int64(m.NumFiles),
447 0 : humanize.Bytes.Int64(m.Size),
448 0 : humanize.Bytes.Uint64(m.Additional.ValueBlocksSize),
449 0 : redact.Safe(scoreStr),
450 0 : humanize.Bytes.Uint64(m.BytesIn),
451 0 : humanize.Count.Uint64(m.TablesIngested),
452 0 : humanize.Bytes.Uint64(m.BytesIngested),
453 0 : humanize.Count.Uint64(m.TablesMoved),
454 0 : humanize.Bytes.Uint64(m.BytesMoved),
455 0 : humanize.Count.Uint64(m.TablesFlushed+m.TablesCompacted),
456 0 : humanize.Bytes.Uint64(m.BytesFlushed+m.BytesCompacted),
457 0 : humanize.Bytes.Uint64(m.BytesRead),
458 0 : redact.Safe(m.Sublevels),
459 0 : redact.Safe(wampStr))
460 0 :
461 0 : if multiExists {
462 0 : w.Printf(" | %5s %5s %5s",
463 0 : humanize.Bytes.Uint64(m.MultiLevel.BytesInTop),
464 0 : humanize.Bytes.Uint64(m.MultiLevel.BytesIn),
465 0 : humanize.Bytes.Uint64(m.MultiLevel.BytesRead))
466 0 : }
467 0 : newline()
468 : }
469 :
470 0 : var total LevelMetrics
471 0 : for level := 0; level < numLevels; level++ {
472 0 : l := &m.Levels[level]
473 0 : w.Printf("%5d ", redact.Safe(level))
474 0 :
475 0 : // Format the score.
476 0 : score := math.NaN()
477 0 : if level < numLevels-1 {
478 0 : score = l.Score
479 0 : }
480 0 : formatRow(l, score)
481 0 : total.Add(l)
482 0 : total.Sublevels += l.Sublevels
483 : }
484 : // Compute total bytes-in as the bytes written to the WAL + bytes ingested.
485 0 : total.BytesIn = m.WAL.BytesWritten + total.BytesIngested
486 0 : // Add the total bytes-in to the total bytes-flushed. This is to account for
487 0 : // the bytes written to the log and bytes written externally and then
488 0 : // ingested.
489 0 : total.BytesFlushed += total.BytesIn
490 0 : w.SafeString("total ")
491 0 : formatRow(&total, math.NaN())
492 0 :
493 0 : w.SafeString("-----------------------------------------------------------------------------------------------------------")
494 0 : appendIfMulti("--------------------")
495 0 : newline()
496 0 : w.Printf("WAL: %d files (%s) in: %s written: %s (%.0f%% overhead)\n",
497 0 : redact.Safe(m.WAL.Files),
498 0 : humanize.Bytes.Uint64(m.WAL.Size),
499 0 : humanize.Bytes.Uint64(m.WAL.BytesIn),
500 0 : humanize.Bytes.Uint64(m.WAL.BytesWritten),
501 0 : redact.Safe(percent(int64(m.WAL.BytesWritten)-int64(m.WAL.BytesIn), int64(m.WAL.BytesIn))))
502 0 :
503 0 : w.Printf("Flushes: %d\n", redact.Safe(m.Flush.Count))
504 0 :
505 0 : w.Printf("Compactions: %d estimated debt: %s in progress: %d (%s)\n",
506 0 : redact.Safe(m.Compact.Count),
507 0 : humanize.Bytes.Uint64(m.Compact.EstimatedDebt),
508 0 : redact.Safe(m.Compact.NumInProgress),
509 0 : humanize.Bytes.Int64(m.Compact.InProgressBytes))
510 0 :
511 0 : w.Printf(" default: %d delete: %d elision: %d move: %d read: %d rewrite: %d multi-level: %d\n",
512 0 : redact.Safe(m.Compact.DefaultCount),
513 0 : redact.Safe(m.Compact.DeleteOnlyCount),
514 0 : redact.Safe(m.Compact.ElisionOnlyCount),
515 0 : redact.Safe(m.Compact.MoveCount),
516 0 : redact.Safe(m.Compact.ReadCount),
517 0 : redact.Safe(m.Compact.RewriteCount),
518 0 : redact.Safe(m.Compact.MultiLevelCount))
519 0 :
520 0 : w.Printf("MemTables: %d (%s) zombie: %d (%s)\n",
521 0 : redact.Safe(m.MemTable.Count),
522 0 : humanize.Bytes.Uint64(m.MemTable.Size),
523 0 : redact.Safe(m.MemTable.ZombieCount),
524 0 : humanize.Bytes.Uint64(m.MemTable.ZombieSize))
525 0 :
526 0 : w.Printf("Zombie tables: %d (%s)\n",
527 0 : redact.Safe(m.Table.ZombieCount),
528 0 : humanize.Bytes.Uint64(m.Table.ZombieSize))
529 0 :
530 0 : formatCacheMetrics := func(m *CacheMetrics, name redact.SafeString) {
531 0 : w.Printf("%s: %s entries (%s) hit rate: %.1f%%\n",
532 0 : name,
533 0 : humanize.Count.Int64(m.Count),
534 0 : humanize.Bytes.Int64(m.Size),
535 0 : redact.Safe(hitRate(m.Hits, m.Misses)))
536 0 : }
537 0 : formatCacheMetrics(&m.BlockCache, "Block cache")
538 0 : formatCacheMetrics(&m.TableCache, "Table cache")
539 0 :
540 0 : formatSharedCacheMetrics := func(w redact.SafePrinter, m *SecondaryCacheMetrics, name redact.SafeString) {
541 0 : w.Printf("%s: %s entries (%s) hit rate: %.1f%%\n",
542 0 : name,
543 0 : humanize.Count.Int64(m.Count),
544 0 : humanize.Bytes.Int64(m.Size),
545 0 : redact.Safe(hitRate(m.ReadsWithFullHit, m.ReadsWithPartialHit+m.ReadsWithNoHit)))
546 0 : }
547 0 : formatSharedCacheMetrics(w, &m.SecondaryCacheMetrics, "Secondary cache")
548 0 :
549 0 : w.Printf("Snapshots: %d earliest seq num: %d\n",
550 0 : redact.Safe(m.Snapshots.Count),
551 0 : redact.Safe(m.Snapshots.EarliestSeqNum))
552 0 :
553 0 : w.Printf("Table iters: %d\n", redact.Safe(m.TableIters))
554 0 : w.Printf("Filter utility: %.1f%%\n", redact.Safe(hitRate(m.Filter.Hits, m.Filter.Misses)))
555 0 : w.Printf("Ingestions: %d as flushable: %d (%s in %d tables)\n",
556 0 : redact.Safe(m.Ingest.Count),
557 0 : redact.Safe(m.Flush.AsIngestCount),
558 0 : humanize.Bytes.Uint64(m.Flush.AsIngestBytes),
559 0 : redact.Safe(m.Flush.AsIngestTableCount))
560 : }
561 :
562 0 : func hitRate(hits, misses int64) float64 {
563 0 : return percent(hits, hits+misses)
564 0 : }
565 :
566 0 : func percent(numerator, denominator int64) float64 {
567 0 : if denominator == 0 {
568 0 : return 0
569 0 : }
570 0 : return 100 * float64(numerator) / float64(denominator)
571 : }
572 :
573 : // StringForTests is identical to m.String() on 64-bit platforms. It is used to
574 : // provide a platform-independent result for tests.
575 0 : func (m *Metrics) StringForTests() string {
576 0 : mCopy := *m
577 0 : if math.MaxInt == math.MaxInt32 {
578 0 : // This is the difference in Sizeof(sstable.Reader{})) between 64 and 32 bit
579 0 : // platforms.
580 0 : const tableCacheSizeAdjustment = 212
581 0 : mCopy.TableCache.Size += mCopy.TableCache.Count * tableCacheSizeAdjustment
582 0 : }
583 0 : return redact.StringWithoutMarkers(&mCopy)
584 : }
|