Line data Source code
1 : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package tool
6 :
7 : import (
8 : "cmp"
9 : "fmt"
10 : "io"
11 : "slices"
12 : "time"
13 :
14 : "github.com/HdrHistogram/hdrhistogram-go"
15 : "github.com/cockroachdb/pebble"
16 : "github.com/cockroachdb/pebble/internal/base"
17 : "github.com/cockroachdb/pebble/internal/humanize"
18 : "github.com/cockroachdb/pebble/internal/manifest"
19 : "github.com/cockroachdb/pebble/record"
20 : "github.com/cockroachdb/pebble/sstable"
21 : "github.com/spf13/cobra"
22 : )
23 :
24 : // manifestT implements manifest-level tools, including both configuration
25 : // state and the commands themselves.
26 : type manifestT struct {
27 : Root *cobra.Command
28 : Dump *cobra.Command
29 : Summarize *cobra.Command
30 : Check *cobra.Command
31 :
32 : opts *pebble.Options
33 : comparers sstable.Comparers
34 : fmtKey keyFormatter
35 : verbose bool
36 :
37 : filterStart key
38 : filterEnd key
39 :
40 : summarizeDur time.Duration
41 : }
42 :
43 1 : func newManifest(opts *pebble.Options, comparers sstable.Comparers) *manifestT {
44 1 : m := &manifestT{
45 1 : opts: opts,
46 1 : comparers: comparers,
47 1 : summarizeDur: time.Hour,
48 1 : }
49 1 : m.fmtKey.mustSet("quoted")
50 1 :
51 1 : m.Root = &cobra.Command{
52 1 : Use: "manifest",
53 1 : Short: "manifest introspection tools",
54 1 : }
55 1 :
56 1 : // Add dump command
57 1 : m.Dump = &cobra.Command{
58 1 : Use: "dump <manifest-files>",
59 1 : Short: "print manifest contents",
60 1 : Long: `
61 1 : Print the contents of the MANIFEST files.
62 1 : `,
63 1 : Args: cobra.MinimumNArgs(1),
64 1 : Run: m.runDump,
65 1 : }
66 1 : m.Dump.Flags().Var(&m.fmtKey, "key", "key formatter")
67 1 : m.Dump.Flags().Var(&m.filterStart, "filter-start", "start key filters out all version edits that only reference sstables containing keys strictly before the given key")
68 1 : m.Dump.Flags().Var(&m.filterEnd, "filter-end", "end key filters out all version edits that only reference sstables containing keys at or strictly after the given key")
69 1 : m.Root.AddCommand(m.Dump)
70 1 : m.Root.PersistentFlags().BoolVarP(&m.verbose, "verbose", "v", false, "verbose output")
71 1 :
72 1 : // Add summarize command
73 1 : m.Summarize = &cobra.Command{
74 1 : Use: "summarize <manifest-files>",
75 1 : Short: "summarize manifest contents",
76 1 : Long: `
77 1 : Summarize the edits to the MANIFEST files over time.
78 1 : `,
79 1 : Args: cobra.MinimumNArgs(1),
80 1 : Run: m.runSummarize,
81 1 : }
82 1 : m.Root.AddCommand(m.Summarize)
83 1 : m.Summarize.Flags().DurationVar(
84 1 : &m.summarizeDur, "dur", time.Hour, "bucket duration as a Go duration string (eg, '1h', '15m')")
85 1 :
86 1 : // Add check command
87 1 : m.Check = &cobra.Command{
88 1 : Use: "check <manifest-files>",
89 1 : Short: "check manifest contents",
90 1 : Long: `
91 1 : Check the contents of the MANIFEST files.
92 1 : `,
93 1 : Args: cobra.MinimumNArgs(1),
94 1 : Run: m.runCheck,
95 1 : }
96 1 : m.Root.AddCommand(m.Check)
97 1 : m.Check.Flags().Var(
98 1 : &m.fmtKey, "key", "key formatter")
99 1 :
100 1 : return m
101 1 : }
102 :
103 1 : func (m *manifestT) printLevels(cmp base.Compare, stdout io.Writer, v *manifest.Version) {
104 1 : for level := range v.Levels {
105 1 : if level == 0 && len(v.L0SublevelFiles) > 0 && !v.Levels[level].Empty() {
106 1 : for sublevel := len(v.L0SublevelFiles) - 1; sublevel >= 0; sublevel-- {
107 1 : fmt.Fprintf(stdout, "--- L0.%d ---\n", sublevel)
108 1 : v.L0SublevelFiles[sublevel].Each(func(f *manifest.FileMetadata) {
109 1 : if !anyOverlapFile(cmp, f, m.filterStart, m.filterEnd) {
110 1 : return
111 1 : }
112 1 : fmt.Fprintf(stdout, " %s:%d", f.FileNum, f.Size)
113 1 : formatSeqNumRange(stdout, f.SmallestSeqNum, f.LargestSeqNum)
114 1 : formatKeyRange(stdout, m.fmtKey, &f.Smallest, &f.Largest)
115 1 : if f.Virtual {
116 0 : fmt.Fprintf(stdout, "(virtual:backingNum=%s)", f.FileBacking.DiskFileNum)
117 0 : }
118 1 : fmt.Fprintf(stdout, "\n")
119 : })
120 : }
121 1 : continue
122 : }
123 1 : fmt.Fprintf(stdout, "--- L%d ---\n", level)
124 1 : iter := v.Levels[level].Iter()
125 1 : for f := iter.First(); f != nil; f = iter.Next() {
126 1 : if !anyOverlapFile(cmp, f, m.filterStart, m.filterEnd) {
127 0 : continue
128 : }
129 1 : fmt.Fprintf(stdout, " %s:%d", f.FileNum, f.Size)
130 1 : formatSeqNumRange(stdout, f.SmallestSeqNum, f.LargestSeqNum)
131 1 : formatKeyRange(stdout, m.fmtKey, &f.Smallest, &f.Largest)
132 1 : if f.Virtual {
133 0 : fmt.Fprintf(stdout, "(virtual:backingNum=%s)", f.FileBacking.DiskFileNum)
134 0 : }
135 1 : fmt.Fprintf(stdout, "\n")
136 : }
137 : }
138 : }
139 :
140 1 : func (m *manifestT) runDump(cmd *cobra.Command, args []string) {
141 1 : stdout, stderr := cmd.OutOrStdout(), cmd.OutOrStderr()
142 1 : for _, arg := range args {
143 1 : func() {
144 1 : f, err := m.opts.FS.Open(arg)
145 1 : if err != nil {
146 0 : fmt.Fprintf(stderr, "%s\n", err)
147 0 : return
148 0 : }
149 1 : defer f.Close()
150 1 :
151 1 : fmt.Fprintf(stdout, "%s\n", arg)
152 1 :
153 1 : var bve manifest.BulkVersionEdit
154 1 : bve.AddedByFileNum = make(map[base.FileNum]*manifest.FileMetadata)
155 1 : var comparer *base.Comparer
156 1 : var editIdx int
157 1 : rr := record.NewReader(f, 0 /* logNum */)
158 1 : for {
159 1 : offset := rr.Offset()
160 1 : r, err := rr.Next()
161 1 : if err != nil {
162 1 : fmt.Fprintf(stdout, "%s\n", err)
163 1 : break
164 : }
165 :
166 1 : var ve manifest.VersionEdit
167 1 : err = ve.Decode(r)
168 1 : if err != nil {
169 0 : fmt.Fprintf(stdout, "%s\n", err)
170 0 : break
171 : }
172 1 : if err := bve.Accumulate(&ve); err != nil {
173 0 : fmt.Fprintf(stdout, "%s\n", err)
174 0 : break
175 : }
176 :
177 1 : if comparer != nil && !anyOverlap(comparer.Compare, &ve, m.filterStart, m.filterEnd) {
178 1 : continue
179 : }
180 :
181 1 : empty := true
182 1 : fmt.Fprintf(stdout, "%d/%d\n", offset, editIdx)
183 1 : if ve.ComparerName != "" {
184 1 : empty = false
185 1 : fmt.Fprintf(stdout, " comparer: %s", ve.ComparerName)
186 1 : comparer = m.comparers[ve.ComparerName]
187 1 : if comparer == nil {
188 0 : fmt.Fprintf(stdout, " (unknown)")
189 0 : }
190 1 : fmt.Fprintf(stdout, "\n")
191 1 : m.fmtKey.setForComparer(ve.ComparerName, m.comparers)
192 : }
193 1 : if ve.MinUnflushedLogNum != 0 {
194 1 : empty = false
195 1 : fmt.Fprintf(stdout, " log-num: %d\n", ve.MinUnflushedLogNum)
196 1 : }
197 1 : if ve.ObsoletePrevLogNum != 0 {
198 0 : empty = false
199 0 : fmt.Fprintf(stdout, " prev-log-num: %d\n", ve.ObsoletePrevLogNum)
200 0 : }
201 1 : if ve.NextFileNum != 0 {
202 1 : empty = false
203 1 : fmt.Fprintf(stdout, " next-file-num: %d\n", ve.NextFileNum)
204 1 : }
205 1 : if ve.LastSeqNum != 0 {
206 1 : empty = false
207 1 : fmt.Fprintf(stdout, " last-seq-num: %d\n", ve.LastSeqNum)
208 1 : }
209 1 : entries := make([]manifest.DeletedFileEntry, 0, len(ve.DeletedFiles))
210 1 : for df := range ve.DeletedFiles {
211 1 : empty = false
212 1 : entries = append(entries, df)
213 1 : }
214 1 : slices.SortFunc(entries, func(a, b manifest.DeletedFileEntry) int {
215 1 : if v := cmp.Compare(a.Level, b.Level); v != 0 {
216 1 : return v
217 1 : }
218 1 : return cmp.Compare(a.FileNum, b.FileNum)
219 : })
220 1 : for _, df := range entries {
221 1 : fmt.Fprintf(stdout, " deleted: L%d %s\n", df.Level, df.FileNum)
222 1 : }
223 1 : for _, nf := range ve.NewFiles {
224 1 : empty = false
225 1 : fmt.Fprintf(stdout, " added: L%d %s:%d",
226 1 : nf.Level, nf.Meta.FileNum, nf.Meta.Size)
227 1 : formatSeqNumRange(stdout, nf.Meta.SmallestSeqNum, nf.Meta.LargestSeqNum)
228 1 : formatKeyRange(stdout, m.fmtKey, &nf.Meta.Smallest, &nf.Meta.Largest)
229 1 : if nf.Meta.CreationTime != 0 {
230 1 : fmt.Fprintf(stdout, " (%s)",
231 1 : time.Unix(nf.Meta.CreationTime, 0).UTC().Format(time.RFC3339))
232 1 : }
233 1 : fmt.Fprintf(stdout, "\n")
234 : }
235 1 : if empty {
236 0 : // NB: An empty version edit can happen if we log a version edit with
237 0 : // a zero field. RocksDB does this with a version edit that contains
238 0 : // `LogNum == 0`.
239 0 : fmt.Fprintf(stdout, " <empty>\n")
240 0 : }
241 1 : editIdx++
242 : }
243 :
244 1 : if comparer != nil {
245 1 : v, err := bve.Apply(
246 1 : nil /* version */, comparer, 0,
247 1 : m.opts.Experimental.ReadCompactionRate,
248 1 : )
249 1 : if err != nil {
250 1 : fmt.Fprintf(stdout, "%s\n", err)
251 1 : return
252 1 : }
253 1 : m.printLevels(comparer.Compare, stdout, v)
254 : }
255 : }()
256 : }
257 : }
258 :
259 1 : func anyOverlap(cmp base.Compare, ve *manifest.VersionEdit, start, end key) bool {
260 1 : if start == nil && end == nil {
261 1 : return true
262 1 : }
263 1 : for _, df := range ve.DeletedFiles {
264 1 : if anyOverlapFile(cmp, df, start, end) {
265 1 : return true
266 1 : }
267 : }
268 1 : for _, nf := range ve.NewFiles {
269 1 : if anyOverlapFile(cmp, nf.Meta, start, end) {
270 1 : return true
271 1 : }
272 : }
273 1 : return false
274 : }
275 :
276 1 : func anyOverlapFile(cmp base.Compare, f *manifest.FileMetadata, start, end key) bool {
277 1 : if f == nil {
278 1 : return true
279 1 : }
280 1 : if start != nil {
281 1 : if v := cmp(f.Largest.UserKey, start); v < 0 {
282 1 : return false
283 1 : } else if f.Largest.IsExclusiveSentinel() && v == 0 {
284 0 : return false
285 0 : }
286 : }
287 1 : if end != nil && cmp(f.Smallest.UserKey, end) >= 0 {
288 1 : return false
289 1 : }
290 1 : return true
291 : }
292 :
293 1 : func (m *manifestT) runSummarize(cmd *cobra.Command, args []string) {
294 1 : for _, arg := range args {
295 1 : err := m.runSummarizeOne(cmd.OutOrStdout(), arg)
296 1 : if err != nil {
297 0 : fmt.Fprintf(cmd.OutOrStderr(), "%s\n", err)
298 0 : }
299 : }
300 : }
301 :
302 1 : func (m *manifestT) runSummarizeOne(stdout io.Writer, arg string) error {
303 1 : f, err := m.opts.FS.Open(arg)
304 1 : if err != nil {
305 0 : return err
306 0 : }
307 1 : defer f.Close()
308 1 : fmt.Fprintf(stdout, "%s\n", arg)
309 1 :
310 1 : type summaryBucket struct {
311 1 : bytesAdded [manifest.NumLevels]uint64
312 1 : bytesCompactOut [manifest.NumLevels]uint64
313 1 : bytesCompactIn [manifest.NumLevels]uint64
314 1 : filesCompactIn [manifest.NumLevels]uint64
315 1 : fileLifetimeSec [manifest.NumLevels]*hdrhistogram.Histogram
316 1 : }
317 1 : // 365 days. Arbitrary.
318 1 : const maxLifetimeSec = 365 * 24 * 60 * 60
319 1 : var (
320 1 : bve manifest.BulkVersionEdit
321 1 : newestOverall time.Time
322 1 : oldestOverall time.Time // oldest after initial version edit
323 1 : buckets = map[time.Time]*summaryBucket{}
324 1 : metadatas = map[base.FileNum]*manifest.FileMetadata{}
325 1 : )
326 1 : bve.AddedByFileNum = make(map[base.FileNum]*manifest.FileMetadata)
327 1 : rr := record.NewReader(f, 0 /* logNum */)
328 1 : numHistErrors := 0
329 1 : for i := 0; ; i++ {
330 1 : r, err := rr.Next()
331 1 : if err == io.EOF {
332 1 : break
333 1 : } else if err != nil {
334 0 : return err
335 0 : }
336 :
337 1 : var ve manifest.VersionEdit
338 1 : err = ve.Decode(r)
339 1 : if err != nil {
340 0 : return err
341 0 : }
342 1 : if err := bve.Accumulate(&ve); err != nil {
343 0 : return err
344 0 : }
345 :
346 : // !isLikelyCompaction corresponds to flushes or ingests, that will be
347 : // counted in bytesAdded. This is imperfect since ingests that excise can
348 : // have deleted files without creating backing tables, and be counted as
349 : // compactions. Also, copy compactions have no deleted files and create
350 : // backing tables, so will be counted as flush/ingest.
351 : //
352 : // The bytesAdded metric overcounts since existing files virtualized by an
353 : // ingest are also included.
354 : //
355 : // TODO(sumeer): this summarization needs a rewrite. We could do that
356 : // after adding an enum to the VersionEdit to aid the summarization.
357 1 : isLikelyCompaction := len(ve.NewFiles) > 0 && len(ve.DeletedFiles) > 0 && len(ve.CreatedBackingTables) == 0
358 1 : isIntraL0Compaction := isLikelyCompaction && ve.NewFiles[0].Level == 0
359 1 : veNewest := newestOverall
360 1 : for _, nf := range ve.NewFiles {
361 1 : _, seen := metadatas[nf.Meta.FileNum]
362 1 : if seen && !isLikelyCompaction {
363 0 : // Output error and continue processing as usual.
364 0 : fmt.Fprintf(stdout, "error: flush/ingest has file that is already known %d size %s\n",
365 0 : nf.Meta.FileNum, humanize.Bytes.Uint64(nf.Meta.Size))
366 0 : }
367 1 : metadatas[nf.Meta.FileNum] = nf.Meta
368 1 : if nf.Meta.CreationTime == 0 {
369 0 : continue
370 : }
371 :
372 1 : t := time.Unix(nf.Meta.CreationTime, 0).UTC()
373 1 : if veNewest.Before(t) {
374 1 : veNewest = t
375 1 : }
376 : }
377 : // Ratchet up the most recent timestamp we've seen.
378 1 : if newestOverall.Before(veNewest) {
379 1 : newestOverall = veNewest
380 1 : }
381 :
382 1 : if i == 0 || newestOverall.IsZero() {
383 1 : continue
384 : }
385 : // Update oldestOverall once, when we encounter the first version edit
386 : // at index >= 1. It should be approximately the start time of the
387 : // manifest.
388 1 : if !newestOverall.IsZero() && oldestOverall.IsZero() {
389 1 : oldestOverall = newestOverall
390 1 : }
391 :
392 1 : bucketKey := newestOverall.Truncate(m.summarizeDur)
393 1 : b := buckets[bucketKey]
394 1 : if b == nil {
395 1 : b = &summaryBucket{}
396 1 : buckets[bucketKey] = b
397 1 : }
398 :
399 1 : for _, nf := range ve.NewFiles {
400 1 : if !isLikelyCompaction {
401 1 : b.bytesAdded[nf.Level] += nf.Meta.Size
402 1 : } else if !isIntraL0Compaction {
403 1 : b.bytesCompactIn[nf.Level] += nf.Meta.Size
404 1 : b.filesCompactIn[nf.Level]++
405 1 : }
406 : }
407 :
408 1 : for dfe := range ve.DeletedFiles {
409 1 : // Increase `bytesCompactOut` for the input level of any compactions
410 1 : // that remove bytes from a level (excluding intra-L0 compactions).
411 1 : if isLikelyCompaction && !isIntraL0Compaction && dfe.Level != manifest.NumLevels-1 {
412 1 : b.bytesCompactOut[dfe.Level] += metadatas[dfe.FileNum].Size
413 1 : }
414 1 : meta, ok := metadatas[dfe.FileNum]
415 1 : if m.verbose && ok && meta.CreationTime > 0 {
416 0 : hist := b.fileLifetimeSec[dfe.Level]
417 0 : if hist == nil {
418 0 : hist = hdrhistogram.New(0, maxLifetimeSec, 1)
419 0 : b.fileLifetimeSec[dfe.Level] = hist
420 0 : }
421 0 : lifetimeSec := int64((newestOverall.Sub(time.Unix(meta.CreationTime, 0).UTC())) / time.Second)
422 0 : if lifetimeSec > maxLifetimeSec {
423 0 : lifetimeSec = maxLifetimeSec
424 0 : }
425 0 : if err := hist.RecordValue(lifetimeSec); err != nil {
426 0 : numHistErrors++
427 0 : }
428 : }
429 : }
430 : }
431 :
432 1 : formatUint64 := func(v uint64, _ time.Duration) string {
433 1 : if v == 0 {
434 1 : return "."
435 1 : }
436 1 : return humanize.Bytes.Uint64(v).String()
437 : }
438 1 : formatByteRate := func(v uint64, dur time.Duration) string {
439 1 : if v == 0 {
440 1 : return "."
441 1 : }
442 1 : secs := dur.Seconds()
443 1 : if secs == 0 {
444 1 : secs = 1
445 1 : }
446 1 : return humanize.Bytes.Uint64(uint64(float64(v)/secs)).String() + "/s"
447 : }
448 1 : formatRate := func(v uint64, dur time.Duration) string {
449 1 : if v == 0 {
450 1 : return "."
451 1 : }
452 1 : secs := dur.Seconds()
453 1 : if secs == 0 {
454 1 : secs = 1
455 1 : }
456 1 : return fmt.Sprintf("%.1f/s", float64(v)/secs)
457 : }
458 :
459 1 : if newestOverall.IsZero() {
460 1 : fmt.Fprintf(stdout, "(no timestamps)\n")
461 1 : } else {
462 1 : // NB: bt begins unaligned with the bucket duration (m.summarizeDur),
463 1 : // but after the first bucket will always be aligned.
464 1 : for bi, bt := 0, oldestOverall; !bt.After(newestOverall); bi, bt = bi+1, bt.Truncate(m.summarizeDur).Add(m.summarizeDur) {
465 1 : // Truncate the start time to calculate the bucket key, and
466 1 : // retrieve the appropriate bucket.
467 1 : bk := bt.Truncate(m.summarizeDur)
468 1 : var bucket summaryBucket
469 1 : if buckets[bk] != nil {
470 1 : bucket = *buckets[bk]
471 1 : }
472 :
473 1 : if bi%10 == 0 {
474 1 : fmt.Fprintf(stdout, " ")
475 1 : fmt.Fprintf(stdout, "_______L0_______L1_______L2_______L3_______L4_______L5_______L6_____TOTAL\n")
476 1 : }
477 1 : fmt.Fprintf(stdout, "%s\n", bt.Format(time.RFC3339))
478 1 :
479 1 : // Compute the bucket duration. It may < `m.summarizeDur` if this is
480 1 : // the first or last bucket.
481 1 : bucketEnd := bt.Truncate(m.summarizeDur).Add(m.summarizeDur)
482 1 : if bucketEnd.After(newestOverall) {
483 1 : bucketEnd = newestOverall
484 1 : }
485 1 : dur := bucketEnd.Sub(bt)
486 1 :
487 1 : stats := []struct {
488 1 : label string
489 1 : format func(uint64, time.Duration) string
490 1 : vals [manifest.NumLevels]uint64
491 1 : }{
492 1 : {"Ingest+Flush Bytes", formatUint64, bucket.bytesAdded},
493 1 : {"Ingest+Flush Bytes/s", formatByteRate, bucket.bytesAdded},
494 1 : {"Compact Out Bytes", formatUint64, bucket.bytesCompactOut},
495 1 : {"Compact Out Bytes/s", formatByteRate, bucket.bytesCompactOut},
496 1 : {"Compact In Bytes/s", formatByteRate, bucket.bytesCompactIn},
497 1 : {"Compact In Files/s", formatRate, bucket.filesCompactIn},
498 1 : }
499 1 : for _, stat := range stats {
500 1 : var sum uint64
501 1 : for _, v := range stat.vals {
502 1 : sum += v
503 1 : }
504 1 : fmt.Fprintf(stdout, "%23s %8s %8s %8s %8s %8s %8s %8s %8s\n",
505 1 : stat.label,
506 1 : stat.format(stat.vals[0], dur),
507 1 : stat.format(stat.vals[1], dur),
508 1 : stat.format(stat.vals[2], dur),
509 1 : stat.format(stat.vals[3], dur),
510 1 : stat.format(stat.vals[4], dur),
511 1 : stat.format(stat.vals[5], dur),
512 1 : stat.format(stat.vals[6], dur),
513 1 : stat.format(sum, dur))
514 : }
515 : }
516 1 : fmt.Fprintf(stdout, "%s\n", newestOverall.Format(time.RFC3339))
517 1 :
518 1 : formatSec := func(sec int64) string {
519 0 : return (time.Second * time.Duration(sec)).String()
520 0 : }
521 1 : if m.verbose {
522 0 : fmt.Fprintf(stdout, "\nLifetime histograms\n")
523 0 : for bi, bt := 0, oldestOverall; !bt.After(newestOverall); bi, bt = bi+1, bt.Truncate(m.summarizeDur).Add(m.summarizeDur) {
524 0 : // Truncate the start time to calculate the bucket key, and
525 0 : // retrieve the appropriate bucket.
526 0 : bk := bt.Truncate(m.summarizeDur)
527 0 : var bucket summaryBucket
528 0 : if buckets[bk] != nil {
529 0 : bucket = *buckets[bk]
530 0 : }
531 0 : fmt.Fprintf(stdout, "%s\n", bt.Format(time.RFC3339))
532 0 : formatHist := func(level int, hist *hdrhistogram.Histogram) {
533 0 : if hist == nil {
534 0 : return
535 0 : }
536 0 : fmt.Fprintf(stdout, " L%d: mean: %s p25: %s p50: %s p75: %s p90: %s\n", level,
537 0 : formatSec(int64(hist.Mean())), formatSec(hist.ValueAtPercentile(25)),
538 0 : formatSec(hist.ValueAtPercentile(50)), formatSec(hist.ValueAtPercentile(75)),
539 0 : formatSec(hist.ValueAtPercentile(90)))
540 : }
541 0 : for i := range bucket.fileLifetimeSec {
542 0 : formatHist(i, bucket.fileLifetimeSec[i])
543 0 : }
544 : }
545 0 : fmt.Fprintf(stdout, "%s\n", newestOverall.Format(time.RFC3339))
546 : }
547 : }
548 :
549 1 : dur := newestOverall.Sub(oldestOverall)
550 1 : fmt.Fprintf(stdout, "---\n")
551 1 : fmt.Fprintf(stdout, "Estimated start time: %s\n", oldestOverall.Format(time.RFC3339))
552 1 : fmt.Fprintf(stdout, "Estimated end time: %s\n", newestOverall.Format(time.RFC3339))
553 1 : fmt.Fprintf(stdout, "Estimated duration: %s\n", dur.String())
554 1 : if numHistErrors > 0 {
555 0 : fmt.Fprintf(stdout, "Errors in lifetime histograms: %d\n", numHistErrors)
556 0 : }
557 :
558 1 : return nil
559 : }
560 :
561 1 : func (m *manifestT) runCheck(cmd *cobra.Command, args []string) {
562 1 : stdout, stderr := cmd.OutOrStdout(), cmd.OutOrStderr()
563 1 : ok := true
564 1 : for _, arg := range args {
565 1 : func() {
566 1 : f, err := m.opts.FS.Open(arg)
567 1 : if err != nil {
568 1 : fmt.Fprintf(stderr, "%s\n", err)
569 1 : ok = false
570 1 : return
571 1 : }
572 1 : defer f.Close()
573 1 :
574 1 : var v *manifest.Version
575 1 : var cmp *base.Comparer
576 1 : rr := record.NewReader(f, 0 /* logNum */)
577 1 : // Contains the FileMetadata needed by BulkVersionEdit.Apply.
578 1 : // It accumulates the additions since later edits contain
579 1 : // deletions of earlier added files.
580 1 : addedByFileNum := make(map[base.FileNum]*manifest.FileMetadata)
581 1 : for {
582 1 : offset := rr.Offset()
583 1 : r, err := rr.Next()
584 1 : if err != nil {
585 1 : if err == io.EOF {
586 1 : break
587 : }
588 0 : fmt.Fprintf(stdout, "%s: offset: %d err: %s\n", arg, offset, err)
589 0 : ok = false
590 0 : break
591 : }
592 :
593 1 : var ve manifest.VersionEdit
594 1 : err = ve.Decode(r)
595 1 : if err != nil {
596 0 : fmt.Fprintf(stdout, "%s: offset: %d err: %s\n", arg, offset, err)
597 0 : ok = false
598 0 : break
599 : }
600 1 : var bve manifest.BulkVersionEdit
601 1 : bve.AddedByFileNum = addedByFileNum
602 1 : if err := bve.Accumulate(&ve); err != nil {
603 0 : fmt.Fprintf(stderr, "%s\n", err)
604 0 : ok = false
605 0 : return
606 0 : }
607 :
608 1 : empty := true
609 1 : if ve.ComparerName != "" {
610 1 : empty = false
611 1 : cmp = m.comparers[ve.ComparerName]
612 1 : if cmp == nil {
613 0 : fmt.Fprintf(stdout, "%s: offset: %d comparer %s not found",
614 0 : arg, offset, ve.ComparerName)
615 0 : ok = false
616 0 : break
617 : }
618 1 : m.fmtKey.setForComparer(ve.ComparerName, m.comparers)
619 : }
620 1 : empty = empty && ve.MinUnflushedLogNum == 0 && ve.ObsoletePrevLogNum == 0 &&
621 1 : ve.LastSeqNum == 0 && len(ve.DeletedFiles) == 0 &&
622 1 : len(ve.NewFiles) == 0
623 1 : if empty {
624 0 : continue
625 : }
626 : // TODO(sbhola): add option to Apply that reports all errors instead of
627 : // one error.
628 1 : newv, err := bve.Apply(v, cmp, 0, m.opts.Experimental.ReadCompactionRate)
629 1 : if err != nil {
630 1 : fmt.Fprintf(stdout, "%s: offset: %d err: %s\n",
631 1 : arg, offset, err)
632 1 : fmt.Fprintf(stdout, "Version state before failed Apply\n")
633 1 : m.printLevels(cmp.Compare, stdout, v)
634 1 : fmt.Fprintf(stdout, "Version edit that failed\n")
635 1 : for df := range ve.DeletedFiles {
636 0 : fmt.Fprintf(stdout, " deleted: L%d %s\n", df.Level, df.FileNum)
637 0 : }
638 1 : for _, nf := range ve.NewFiles {
639 1 : fmt.Fprintf(stdout, " added: L%d %s:%d",
640 1 : nf.Level, nf.Meta.FileNum, nf.Meta.Size)
641 1 : formatSeqNumRange(stdout, nf.Meta.SmallestSeqNum, nf.Meta.LargestSeqNum)
642 1 : formatKeyRange(stdout, m.fmtKey, &nf.Meta.Smallest, &nf.Meta.Largest)
643 1 : fmt.Fprintf(stdout, "\n")
644 1 : }
645 1 : ok = false
646 1 : break
647 : }
648 1 : v = newv
649 : }
650 : }()
651 : }
652 1 : if ok {
653 1 : fmt.Fprintf(stdout, "OK\n")
654 1 : }
655 : }
|