Line data Source code
1 : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package tool
6 :
7 : import (
8 : "context"
9 : "fmt"
10 : "io"
11 : "text/tabwriter"
12 :
13 : "github.com/cockroachdb/errors"
14 : "github.com/cockroachdb/errors/oserror"
15 : "github.com/cockroachdb/pebble"
16 : "github.com/cockroachdb/pebble/internal/base"
17 : "github.com/cockroachdb/pebble/internal/humanize"
18 : "github.com/cockroachdb/pebble/internal/manifest"
19 : "github.com/cockroachdb/pebble/objstorage"
20 : "github.com/cockroachdb/pebble/objstorage/objstorageprovider"
21 : "github.com/cockroachdb/pebble/record"
22 : "github.com/cockroachdb/pebble/sstable"
23 : "github.com/cockroachdb/pebble/tool/logs"
24 : "github.com/spf13/cobra"
25 : )
26 :
27 : // dbT implements db-level tools, including both configuration state and the
28 : // commands themselves.
29 : type dbT struct {
30 : Root *cobra.Command
31 : Check *cobra.Command
32 : Checkpoint *cobra.Command
33 : Get *cobra.Command
34 : Logs *cobra.Command
35 : LSM *cobra.Command
36 : Properties *cobra.Command
37 : Scan *cobra.Command
38 : Set *cobra.Command
39 : Space *cobra.Command
40 : IOBench *cobra.Command
41 :
42 : // Configuration.
43 : opts *pebble.Options
44 : comparers sstable.Comparers
45 : mergers sstable.Mergers
46 : openErrEnhancer func(error) error
47 :
48 : // Flags.
49 : comparerName string
50 : mergerName string
51 : fmtKey keyFormatter
52 : fmtValue valueFormatter
53 : start key
54 : end key
55 : count int64
56 : allLevels bool
57 : ioCount int
58 : ioParallelism int
59 : ioSizes string
60 : verbose bool
61 : }
62 :
63 : func newDB(
64 : opts *pebble.Options,
65 : comparers sstable.Comparers,
66 : mergers sstable.Mergers,
67 : openErrEnhancer func(error) error,
68 1 : ) *dbT {
69 1 : d := &dbT{
70 1 : opts: opts,
71 1 : comparers: comparers,
72 1 : mergers: mergers,
73 1 : openErrEnhancer: openErrEnhancer,
74 1 : }
75 1 : d.fmtKey.mustSet("quoted")
76 1 : d.fmtValue.mustSet("[%x]")
77 1 :
78 1 : d.Root = &cobra.Command{
79 1 : Use: "db",
80 1 : Short: "DB introspection tools",
81 1 : }
82 1 : d.Check = &cobra.Command{
83 1 : Use: "check <dir>",
84 1 : Short: "verify checksums and metadata",
85 1 : Long: `
86 1 : Verify sstable, manifest, and WAL checksums. Requires that the specified
87 1 : database not be in use by another process.
88 1 : `,
89 1 : Args: cobra.ExactArgs(1),
90 1 : Run: d.runCheck,
91 1 : }
92 1 : d.Checkpoint = &cobra.Command{
93 1 : Use: "checkpoint <src-dir> <dest-dir>",
94 1 : Short: "create a checkpoint",
95 1 : Long: `
96 1 : Creates a Pebble checkpoint in the specified destination directory. A checkpoint
97 1 : is a point-in-time snapshot of DB state. Requires that the specified
98 1 : database not be in use by another process.
99 1 : `,
100 1 : Args: cobra.ExactArgs(2),
101 1 : Run: d.runCheckpoint,
102 1 : }
103 1 : d.Get = &cobra.Command{
104 1 : Use: "get <dir> <key>",
105 1 : Short: "get value for a key",
106 1 : Long: `
107 1 : Gets a value for a key, if it exists in DB. Prints a "not found" error if key
108 1 : does not exist. Requires that the specified database not be in use by another
109 1 : process.
110 1 : `,
111 1 : Args: cobra.ExactArgs(2),
112 1 : Run: d.runGet,
113 1 : }
114 1 : d.Logs = logs.NewCmd()
115 1 : d.LSM = &cobra.Command{
116 1 : Use: "lsm <dir>",
117 1 : Short: "print LSM structure",
118 1 : Long: `
119 1 : Print the structure of the LSM tree. Requires that the specified database not
120 1 : be in use by another process.
121 1 : `,
122 1 : Args: cobra.ExactArgs(1),
123 1 : Run: d.runLSM,
124 1 : }
125 1 : d.Properties = &cobra.Command{
126 1 : Use: "properties <dir>",
127 1 : Short: "print aggregated sstable properties",
128 1 : Long: `
129 1 : Print SSTable properties, aggregated per level of the LSM.
130 1 : `,
131 1 : Args: cobra.ExactArgs(1),
132 1 : Run: d.runProperties,
133 1 : }
134 1 : d.Scan = &cobra.Command{
135 1 : Use: "scan <dir>",
136 1 : Short: "print db records",
137 1 : Long: `
138 1 : Print the records in the DB. Requires that the specified database not be in use
139 1 : by another process.
140 1 : `,
141 1 : Args: cobra.ExactArgs(1),
142 1 : Run: d.runScan,
143 1 : }
144 1 : d.Set = &cobra.Command{
145 1 : Use: "set <dir> <key> <value>",
146 1 : Short: "set a value for a key",
147 1 : Long: `
148 1 : Adds a new key/value to the DB. Requires that the specified database
149 1 : not be in use by another process.
150 1 : `,
151 1 : Args: cobra.ExactArgs(3),
152 1 : Run: d.runSet,
153 1 : }
154 1 : d.Space = &cobra.Command{
155 1 : Use: "space <dir>",
156 1 : Short: "print filesystem space used",
157 1 : Long: `
158 1 : Print the estimated filesystem space usage for the inclusive-inclusive range
159 1 : specified by --start and --end. Requires that the specified database not be in
160 1 : use by another process.
161 1 : `,
162 1 : Args: cobra.ExactArgs(1),
163 1 : Run: d.runSpace,
164 1 : }
165 1 : d.IOBench = &cobra.Command{
166 1 : Use: "io-bench <dir>",
167 1 : Short: "perform sstable IO benchmark",
168 1 : Long: `
169 1 : Run a random IO workload with various IO sizes against the sstables in the
170 1 : specified database.
171 1 : `,
172 1 : Args: cobra.ExactArgs(1),
173 1 : Run: d.runIOBench,
174 1 : }
175 1 :
176 1 : d.Root.AddCommand(d.Check, d.Checkpoint, d.Get, d.Logs, d.LSM, d.Properties, d.Scan, d.Set, d.Space, d.IOBench)
177 1 : d.Root.PersistentFlags().BoolVarP(&d.verbose, "verbose", "v", false, "verbose output")
178 1 :
179 1 : for _, cmd := range []*cobra.Command{d.Check, d.Checkpoint, d.Get, d.LSM, d.Properties, d.Scan, d.Set, d.Space} {
180 1 : cmd.Flags().StringVar(
181 1 : &d.comparerName, "comparer", "", "comparer name (use default if empty)")
182 1 : cmd.Flags().StringVar(
183 1 : &d.mergerName, "merger", "", "merger name (use default if empty)")
184 1 : }
185 :
186 1 : for _, cmd := range []*cobra.Command{d.Scan, d.Space} {
187 1 : cmd.Flags().Var(
188 1 : &d.start, "start", "start key for the range")
189 1 : cmd.Flags().Var(
190 1 : &d.end, "end", "end key for the range")
191 1 : }
192 :
193 1 : d.Scan.Flags().Var(
194 1 : &d.fmtKey, "key", "key formatter")
195 1 : for _, cmd := range []*cobra.Command{d.Scan, d.Get} {
196 1 : cmd.Flags().Var(
197 1 : &d.fmtValue, "value", "value formatter")
198 1 : }
199 :
200 1 : d.Scan.Flags().Int64Var(
201 1 : &d.count, "count", 0, "key count for scan (0 is unlimited)")
202 1 :
203 1 : d.IOBench.Flags().BoolVar(
204 1 : &d.allLevels, "all-levels", false, "if set, benchmark all levels (default is only L5/L6)")
205 1 : d.IOBench.Flags().IntVar(
206 1 : &d.ioCount, "io-count", 10000, "number of IOs (per IO size) to benchmark")
207 1 : d.IOBench.Flags().IntVar(
208 1 : &d.ioParallelism, "io-parallelism", 16, "number of goroutines issuing IO")
209 1 : d.IOBench.Flags().StringVar(
210 1 : &d.ioSizes, "io-sizes-kb", "4,16,64,128,256,512,1024", "comma separated list of IO sizes in KB")
211 1 :
212 1 : return d
213 : }
214 :
215 1 : func (d *dbT) loadOptions(dir string) error {
216 1 : ls, err := d.opts.FS.List(dir)
217 1 : if err != nil || len(ls) == 0 {
218 1 : // NB: We don't return the error here as we prefer to return the error from
219 1 : // pebble.Open. Another way to put this is that a non-existent directory is
220 1 : // not a failure in loading the options.
221 1 : return nil
222 1 : }
223 :
224 1 : hooks := &pebble.ParseHooks{
225 1 : NewComparer: func(name string) (*pebble.Comparer, error) {
226 0 : if c := d.comparers[name]; c != nil {
227 0 : return c, nil
228 0 : }
229 0 : return nil, errors.Errorf("unknown comparer %q", errors.Safe(name))
230 : },
231 0 : NewMerger: func(name string) (*pebble.Merger, error) {
232 0 : if m := d.mergers[name]; m != nil {
233 0 : return m, nil
234 0 : }
235 0 : return nil, errors.Errorf("unknown merger %q", errors.Safe(name))
236 : },
237 0 : SkipUnknown: func(name, value string) bool {
238 0 : return true
239 0 : },
240 : }
241 :
242 : // TODO(peter): RocksDB sometimes leaves multiple OPTIONS files in
243 : // existence. We parse all of them as the comparer and merger shouldn't be
244 : // changing. We could parse only the first or the latest. Not clear if this
245 : // matters.
246 1 : var dbOpts pebble.Options
247 1 : for _, filename := range ls {
248 1 : ft, _, ok := base.ParseFilename(d.opts.FS, filename)
249 1 : if !ok {
250 1 : continue
251 : }
252 1 : switch ft {
253 1 : case base.FileTypeOptions:
254 1 : err := func() error {
255 1 : f, err := d.opts.FS.Open(d.opts.FS.PathJoin(dir, filename))
256 1 : if err != nil {
257 0 : return err
258 0 : }
259 1 : defer f.Close()
260 1 :
261 1 : data, err := io.ReadAll(f)
262 1 : if err != nil {
263 0 : return err
264 0 : }
265 :
266 1 : if err := dbOpts.Parse(string(data), hooks); err != nil {
267 1 : return err
268 1 : }
269 1 : return nil
270 : }()
271 1 : if err != nil {
272 1 : return err
273 1 : }
274 : }
275 : }
276 :
277 1 : if dbOpts.Comparer != nil {
278 1 : d.opts.Comparer = dbOpts.Comparer
279 1 : }
280 1 : if dbOpts.Merger != nil {
281 1 : d.opts.Merger = dbOpts.Merger
282 1 : }
283 1 : return nil
284 : }
285 :
286 : type openOption interface {
287 : apply(opts *pebble.Options)
288 : }
289 :
290 1 : func (d *dbT) openDB(dir string, openOptions ...openOption) (*pebble.DB, error) {
291 1 : db, err := d.openDBInternal(dir, openOptions...)
292 1 : if err != nil {
293 1 : if d.openErrEnhancer != nil {
294 1 : err = d.openErrEnhancer(err)
295 1 : }
296 1 : return nil, err
297 : }
298 1 : return db, nil
299 : }
300 :
301 1 : func (d *dbT) openDBInternal(dir string, openOptions ...openOption) (*pebble.DB, error) {
302 1 : if err := d.loadOptions(dir); err != nil {
303 1 : return nil, errors.Wrap(err, "error loading options")
304 1 : }
305 1 : if d.comparerName != "" {
306 1 : d.opts.Comparer = d.comparers[d.comparerName]
307 1 : if d.opts.Comparer == nil {
308 1 : return nil, errors.Errorf("unknown comparer %q", errors.Safe(d.comparerName))
309 1 : }
310 : }
311 1 : if d.mergerName != "" {
312 1 : d.opts.Merger = d.mergers[d.mergerName]
313 1 : if d.opts.Merger == nil {
314 1 : return nil, errors.Errorf("unknown merger %q", errors.Safe(d.mergerName))
315 1 : }
316 : }
317 1 : opts := *d.opts
318 1 : for _, opt := range openOptions {
319 1 : opt.apply(&opts)
320 1 : }
321 1 : opts.Cache = pebble.NewCache(128 << 20 /* 128 MB */)
322 1 : defer opts.Cache.Unref()
323 1 : return pebble.Open(dir, &opts)
324 : }
325 :
326 1 : func (d *dbT) closeDB(stderr io.Writer, db *pebble.DB) {
327 1 : if err := db.Close(); err != nil {
328 0 : fmt.Fprintf(stderr, "%s\n", err)
329 0 : }
330 : }
331 :
332 1 : func (d *dbT) runCheck(cmd *cobra.Command, args []string) {
333 1 : stdout, stderr := cmd.OutOrStdout(), cmd.ErrOrStderr()
334 1 : db, err := d.openDB(args[0])
335 1 : if err != nil {
336 1 : fmt.Fprintf(stderr, "%s\n", err)
337 1 : return
338 1 : }
339 1 : defer d.closeDB(stderr, db)
340 1 :
341 1 : var stats pebble.CheckLevelsStats
342 1 : if err := db.CheckLevels(&stats); err != nil {
343 0 : fmt.Fprintf(stderr, "%s\n", err)
344 0 : }
345 1 : fmt.Fprintf(stdout, "checked %d %s and %d %s\n",
346 1 : stats.NumPoints, makePlural("point", stats.NumPoints), stats.NumTombstones, makePlural("tombstone", int64(stats.NumTombstones)))
347 : }
348 :
349 : type nonReadOnly struct{}
350 :
351 1 : func (n nonReadOnly) apply(opts *pebble.Options) {
352 1 : opts.ReadOnly = false
353 1 : // Increase the L0 compaction threshold to reduce the likelihood of an
354 1 : // unintended compaction changing test output.
355 1 : opts.L0CompactionThreshold = 10
356 1 : }
357 :
358 1 : func (d *dbT) runCheckpoint(cmd *cobra.Command, args []string) {
359 1 : stderr := cmd.ErrOrStderr()
360 1 : db, err := d.openDB(args[0], nonReadOnly{})
361 1 : if err != nil {
362 0 : fmt.Fprintf(stderr, "%s\n", err)
363 0 : return
364 0 : }
365 1 : defer d.closeDB(stderr, db)
366 1 : destDir := args[1]
367 1 :
368 1 : if err := db.Checkpoint(destDir); err != nil {
369 0 : fmt.Fprintf(stderr, "%s\n", err)
370 0 : }
371 : }
372 :
373 1 : func (d *dbT) runGet(cmd *cobra.Command, args []string) {
374 1 : stdout, stderr := cmd.OutOrStdout(), cmd.ErrOrStderr()
375 1 : db, err := d.openDB(args[0])
376 1 : if err != nil {
377 0 : fmt.Fprintf(stderr, "%s\n", err)
378 0 : return
379 0 : }
380 1 : defer d.closeDB(stderr, db)
381 1 : var k key
382 1 : if err := k.Set(args[1]); err != nil {
383 0 : fmt.Fprintf(stderr, "%s\n", err)
384 0 : return
385 0 : }
386 :
387 1 : val, closer, err := db.Get(k)
388 1 : if err != nil {
389 1 : fmt.Fprintf(stderr, "%s\n", err)
390 1 : return
391 1 : }
392 1 : defer func() {
393 1 : if closer != nil {
394 1 : closer.Close()
395 1 : }
396 : }()
397 1 : if val != nil {
398 1 : fmt.Fprintf(stdout, "%s\n", d.fmtValue.fn(k, val))
399 1 : }
400 : }
401 :
402 1 : func (d *dbT) runLSM(cmd *cobra.Command, args []string) {
403 1 : stdout, stderr := cmd.OutOrStdout(), cmd.ErrOrStderr()
404 1 : db, err := d.openDB(args[0])
405 1 : if err != nil {
406 1 : fmt.Fprintf(stderr, "%s\n", err)
407 1 : return
408 1 : }
409 1 : defer d.closeDB(stderr, db)
410 1 :
411 1 : fmt.Fprintf(stdout, "%s", db.Metrics())
412 : }
413 :
414 1 : func (d *dbT) runScan(cmd *cobra.Command, args []string) {
415 1 : stdout, stderr := cmd.OutOrStdout(), cmd.ErrOrStderr()
416 1 : db, err := d.openDB(args[0])
417 1 : if err != nil {
418 1 : fmt.Fprintf(stderr, "%s\n", err)
419 1 : return
420 1 : }
421 1 : defer d.closeDB(stderr, db)
422 1 :
423 1 : // Update the internal formatter if this comparator has one specified.
424 1 : if d.opts.Comparer != nil {
425 1 : d.fmtKey.setForComparer(d.opts.Comparer.Name, d.comparers)
426 1 : d.fmtValue.setForComparer(d.opts.Comparer.Name, d.comparers)
427 1 : }
428 :
429 1 : start := timeNow()
430 1 : fmtKeys := d.fmtKey.spec != "null"
431 1 : fmtValues := d.fmtValue.spec != "null"
432 1 : var count int64
433 1 :
434 1 : iter, _ := db.NewIter(&pebble.IterOptions{
435 1 : UpperBound: d.end,
436 1 : })
437 1 : for valid := iter.SeekGE(d.start); valid; valid = iter.Next() {
438 1 : if fmtKeys || fmtValues {
439 1 : needDelimiter := false
440 1 : if fmtKeys {
441 1 : fmt.Fprintf(stdout, "%s", d.fmtKey.fn(iter.Key()))
442 1 : needDelimiter = true
443 1 : }
444 1 : if fmtValues {
445 1 : if needDelimiter {
446 1 : stdout.Write([]byte{' '})
447 1 : }
448 1 : fmt.Fprintf(stdout, "%s", d.fmtValue.fn(iter.Key(), iter.Value()))
449 : }
450 1 : stdout.Write([]byte{'\n'})
451 : }
452 :
453 1 : count++
454 1 : if d.count > 0 && count >= d.count {
455 1 : break
456 : }
457 : }
458 :
459 1 : if err := iter.Close(); err != nil {
460 0 : fmt.Fprintf(stderr, "%s\n", err)
461 0 : }
462 :
463 1 : elapsed := timeNow().Sub(start)
464 1 :
465 1 : fmt.Fprintf(stdout, "scanned %d %s in %0.1fs\n",
466 1 : count, makePlural("record", count), elapsed.Seconds())
467 : }
468 :
469 1 : func (d *dbT) runSpace(cmd *cobra.Command, args []string) {
470 1 : stdout, stderr := cmd.OutOrStdout(), cmd.ErrOrStderr()
471 1 : db, err := d.openDB(args[0])
472 1 : if err != nil {
473 0 : fmt.Fprintf(stderr, "%s\n", err)
474 0 : return
475 0 : }
476 1 : defer d.closeDB(stdout, db)
477 1 :
478 1 : bytes, err := db.EstimateDiskUsage(d.start, d.end)
479 1 : if err != nil {
480 0 : fmt.Fprintf(stderr, "%s\n", err)
481 0 : return
482 0 : }
483 1 : fmt.Fprintf(stdout, "%d\n", bytes)
484 : }
485 :
486 1 : func (d *dbT) runProperties(cmd *cobra.Command, args []string) {
487 1 : stdout, stderr := cmd.OutOrStdout(), cmd.ErrOrStderr()
488 1 : dirname := args[0]
489 1 : err := func() error {
490 1 : desc, err := pebble.Peek(dirname, d.opts.FS)
491 1 : if err != nil {
492 1 : return err
493 1 : } else if !desc.Exists {
494 0 : return oserror.ErrNotExist
495 0 : }
496 1 : manifestFilename := d.opts.FS.PathBase(desc.ManifestFilename)
497 1 :
498 1 : // Replay the manifest to get the current version.
499 1 : f, err := d.opts.FS.Open(desc.ManifestFilename)
500 1 : if err != nil {
501 0 : return errors.Wrapf(err, "pebble: could not open MANIFEST file %q", manifestFilename)
502 0 : }
503 1 : defer f.Close()
504 1 :
505 1 : cmp := base.DefaultComparer
506 1 : var bve manifest.BulkVersionEdit
507 1 : bve.AddedByFileNum = make(map[base.FileNum]*manifest.FileMetadata)
508 1 : rr := record.NewReader(f, 0 /* logNum */)
509 1 : for {
510 1 : r, err := rr.Next()
511 1 : if err == io.EOF {
512 1 : break
513 : }
514 1 : if err != nil {
515 0 : return errors.Wrapf(err, "pebble: reading manifest %q", manifestFilename)
516 0 : }
517 1 : var ve manifest.VersionEdit
518 1 : err = ve.Decode(r)
519 1 : if err != nil {
520 0 : return err
521 0 : }
522 1 : if err := bve.Accumulate(&ve); err != nil {
523 0 : return err
524 0 : }
525 1 : if ve.ComparerName != "" {
526 1 : cmp = d.comparers[ve.ComparerName]
527 1 : d.fmtKey.setForComparer(ve.ComparerName, d.comparers)
528 1 : d.fmtValue.setForComparer(ve.ComparerName, d.comparers)
529 1 : }
530 : }
531 1 : v, err := bve.Apply(
532 1 : nil /* version */, cmp, d.opts.FlushSplitBytes,
533 1 : d.opts.Experimental.ReadCompactionRate, nil, /* zombies */
534 1 : )
535 1 : if err != nil {
536 0 : return err
537 0 : }
538 :
539 1 : objProvider, err := objstorageprovider.Open(objstorageprovider.DefaultSettings(d.opts.FS, dirname))
540 1 : if err != nil {
541 0 : return err
542 0 : }
543 1 : defer objProvider.Close()
544 1 :
545 1 : // Load and aggregate sstable properties.
546 1 : tw := tabwriter.NewWriter(stdout, 2, 1, 4, ' ', 0)
547 1 : var total props
548 1 : var all []props
549 1 : for _, l := range v.Levels {
550 1 : iter := l.Iter()
551 1 : var level props
552 1 : for t := iter.First(); t != nil; t = iter.Next() {
553 1 : if t.Virtual {
554 0 : // TODO(bananabrick): Handle virtual sstables here. We don't
555 0 : // really have any stats or properties at this point. Maybe
556 0 : // we could approximate some of these properties for virtual
557 0 : // sstables by first grabbing properties for the backing
558 0 : // physical sstable, and then extrapolating.
559 0 : continue
560 : }
561 1 : err := d.addProps(objProvider, t.PhysicalMeta(), &level)
562 1 : if err != nil {
563 0 : return err
564 0 : }
565 : }
566 1 : all = append(all, level)
567 1 : total.update(level)
568 : }
569 1 : all = append(all, total)
570 1 :
571 1 : fmt.Fprintln(tw, "\tL0\tL1\tL2\tL3\tL4\tL5\tL6\tTOTAL")
572 1 :
573 1 : fmt.Fprintf(tw, "count\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
574 1 : propArgs(all, func(p *props) interface{} { return p.Count })...)
575 :
576 1 : fmt.Fprintln(tw, "seq num\t\t\t\t\t\t\t\t")
577 1 : fmt.Fprintf(tw, " smallest\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
578 1 : propArgs(all, func(p *props) interface{} { return p.SmallestSeqNum })...)
579 1 : fmt.Fprintf(tw, " largest\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
580 1 : propArgs(all, func(p *props) interface{} { return p.LargestSeqNum })...)
581 :
582 1 : fmt.Fprintln(tw, "size\t\t\t\t\t\t\t\t")
583 1 : fmt.Fprintf(tw, " data\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
584 1 : propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.DataSize) })...)
585 1 : fmt.Fprintf(tw, " blocks\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
586 1 : propArgs(all, func(p *props) interface{} { return p.NumDataBlocks })...)
587 1 : fmt.Fprintf(tw, " index\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
588 1 : propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.IndexSize) })...)
589 1 : fmt.Fprintf(tw, " blocks\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
590 1 : propArgs(all, func(p *props) interface{} { return p.NumIndexBlocks })...)
591 1 : fmt.Fprintf(tw, " top-level\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
592 1 : propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.TopLevelIndexSize) })...)
593 1 : fmt.Fprintf(tw, " filter\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
594 1 : propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.FilterSize) })...)
595 1 : fmt.Fprintf(tw, " raw-key\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
596 1 : propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.RawKeySize) })...)
597 1 : fmt.Fprintf(tw, " raw-value\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
598 1 : propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.RawValueSize) })...)
599 1 : fmt.Fprintf(tw, " pinned-key\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
600 1 : propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.SnapshotPinnedKeySize) })...)
601 1 : fmt.Fprintf(tw, " pinned-value\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
602 1 : propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.SnapshotPinnedValueSize) })...)
603 1 : fmt.Fprintf(tw, " point-del-key-size\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
604 1 : propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.RawPointTombstoneKeySize) })...)
605 1 : fmt.Fprintf(tw, " point-del-value-size\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
606 1 : propArgs(all, func(p *props) interface{} { return humanize.Bytes.Uint64(p.RawPointTombstoneValueSize) })...)
607 :
608 1 : fmt.Fprintln(tw, "records\t\t\t\t\t\t\t\t")
609 1 : fmt.Fprintf(tw, " set\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
610 1 : propArgs(all, func(p *props) interface{} {
611 1 : return humanize.Count.Uint64(p.NumEntries - p.NumDeletions - p.NumMergeOperands)
612 1 : })...)
613 1 : fmt.Fprintf(tw, " delete\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
614 1 : propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.NumDeletions - p.NumRangeDeletions) })...)
615 1 : fmt.Fprintf(tw, " delete-sized\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
616 1 : propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.NumSizedDeletions) })...)
617 1 : fmt.Fprintf(tw, " range-delete\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
618 1 : propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.NumRangeDeletions) })...)
619 1 : fmt.Fprintf(tw, " range-key-sets\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
620 1 : propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.NumRangeKeySets) })...)
621 1 : fmt.Fprintf(tw, " range-key-unsets\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
622 1 : propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.NumRangeKeyUnSets) })...)
623 1 : fmt.Fprintf(tw, " range-key-deletes\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
624 1 : propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.NumRangeKeyDeletes) })...)
625 1 : fmt.Fprintf(tw, " merge\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
626 1 : propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.NumMergeOperands) })...)
627 1 : fmt.Fprintf(tw, " pinned\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
628 1 : propArgs(all, func(p *props) interface{} { return humanize.Count.Uint64(p.SnapshotPinnedKeys) })...)
629 :
630 1 : if err := tw.Flush(); err != nil {
631 0 : return err
632 0 : }
633 1 : return nil
634 : }()
635 1 : if err != nil {
636 1 : fmt.Fprintln(stderr, err)
637 1 : }
638 : }
639 :
640 1 : func (d *dbT) runSet(cmd *cobra.Command, args []string) {
641 1 : stderr := cmd.ErrOrStderr()
642 1 : db, err := d.openDB(args[0], nonReadOnly{})
643 1 : if err != nil {
644 0 : fmt.Fprintf(stderr, "%s\n", err)
645 0 : return
646 0 : }
647 1 : defer d.closeDB(stderr, db)
648 1 : var k, v key
649 1 : if err := k.Set(args[1]); err != nil {
650 0 : fmt.Fprintf(stderr, "%s\n", err)
651 0 : return
652 0 : }
653 1 : if err := v.Set(args[2]); err != nil {
654 0 : fmt.Fprintf(stderr, "%s\n", err)
655 0 : return
656 0 : }
657 :
658 1 : if err := db.Set(k, v, nil); err != nil {
659 0 : fmt.Fprintf(stderr, "%s\n", err)
660 0 : }
661 : }
662 :
663 1 : func propArgs(props []props, getProp func(*props) interface{}) []interface{} {
664 1 : args := make([]interface{}, 0, len(props))
665 1 : for _, p := range props {
666 1 : args = append(args, getProp(&p))
667 1 : }
668 1 : return args
669 : }
670 :
671 : type props struct {
672 : Count uint64
673 : SmallestSeqNum uint64
674 : LargestSeqNum uint64
675 : DataSize uint64
676 : FilterSize uint64
677 : IndexSize uint64
678 : NumDataBlocks uint64
679 : NumIndexBlocks uint64
680 : NumDeletions uint64
681 : NumSizedDeletions uint64
682 : NumEntries uint64
683 : NumMergeOperands uint64
684 : NumRangeDeletions uint64
685 : NumRangeKeySets uint64
686 : NumRangeKeyUnSets uint64
687 : NumRangeKeyDeletes uint64
688 : RawKeySize uint64
689 : RawPointTombstoneKeySize uint64
690 : RawPointTombstoneValueSize uint64
691 : RawValueSize uint64
692 : SnapshotPinnedKeys uint64
693 : SnapshotPinnedKeySize uint64
694 : SnapshotPinnedValueSize uint64
695 : TopLevelIndexSize uint64
696 : }
697 :
698 1 : func (p *props) update(o props) {
699 1 : p.Count += o.Count
700 1 : if o.SmallestSeqNum != 0 && (o.SmallestSeqNum < p.SmallestSeqNum || p.SmallestSeqNum == 0) {
701 1 : p.SmallestSeqNum = o.SmallestSeqNum
702 1 : }
703 1 : if o.LargestSeqNum > p.LargestSeqNum {
704 1 : p.LargestSeqNum = o.LargestSeqNum
705 1 : }
706 1 : p.DataSize += o.DataSize
707 1 : p.FilterSize += o.FilterSize
708 1 : p.IndexSize += o.IndexSize
709 1 : p.NumDataBlocks += o.NumDataBlocks
710 1 : p.NumIndexBlocks += o.NumIndexBlocks
711 1 : p.NumDeletions += o.NumDeletions
712 1 : p.NumSizedDeletions += o.NumSizedDeletions
713 1 : p.NumEntries += o.NumEntries
714 1 : p.NumMergeOperands += o.NumMergeOperands
715 1 : p.NumRangeDeletions += o.NumRangeDeletions
716 1 : p.NumRangeKeySets += o.NumRangeKeySets
717 1 : p.NumRangeKeyUnSets += o.NumRangeKeyUnSets
718 1 : p.NumRangeKeyDeletes += o.NumRangeKeyDeletes
719 1 : p.RawKeySize += o.RawKeySize
720 1 : p.RawPointTombstoneKeySize += o.RawPointTombstoneKeySize
721 1 : p.RawPointTombstoneValueSize += o.RawPointTombstoneValueSize
722 1 : p.RawValueSize += o.RawValueSize
723 1 : p.SnapshotPinnedKeySize += o.SnapshotPinnedKeySize
724 1 : p.SnapshotPinnedValueSize += o.SnapshotPinnedValueSize
725 1 : p.SnapshotPinnedKeys += o.SnapshotPinnedKeys
726 1 : p.TopLevelIndexSize += o.TopLevelIndexSize
727 : }
728 :
729 : func (d *dbT) addProps(
730 : objProvider objstorage.Provider, m manifest.PhysicalFileMeta, p *props,
731 1 : ) error {
732 1 : ctx := context.Background()
733 1 : f, err := objProvider.OpenForReading(ctx, base.FileTypeTable, m.FileBacking.DiskFileNum, objstorage.OpenOptions{})
734 1 : if err != nil {
735 0 : return err
736 0 : }
737 1 : r, err := sstable.NewReader(f, sstable.ReaderOptions{}, d.mergers, d.comparers)
738 1 : if err != nil {
739 0 : _ = f.Close()
740 0 : return err
741 0 : }
742 1 : p.update(props{
743 1 : Count: 1,
744 1 : SmallestSeqNum: m.SmallestSeqNum,
745 1 : LargestSeqNum: m.LargestSeqNum,
746 1 : DataSize: r.Properties.DataSize,
747 1 : FilterSize: r.Properties.FilterSize,
748 1 : IndexSize: r.Properties.IndexSize,
749 1 : NumDataBlocks: r.Properties.NumDataBlocks,
750 1 : NumIndexBlocks: 1 + r.Properties.IndexPartitions,
751 1 : NumDeletions: r.Properties.NumDeletions,
752 1 : NumSizedDeletions: r.Properties.NumSizedDeletions,
753 1 : NumEntries: r.Properties.NumEntries,
754 1 : NumMergeOperands: r.Properties.NumMergeOperands,
755 1 : NumRangeDeletions: r.Properties.NumRangeDeletions,
756 1 : NumRangeKeySets: r.Properties.NumRangeKeySets,
757 1 : NumRangeKeyUnSets: r.Properties.NumRangeKeyUnsets,
758 1 : NumRangeKeyDeletes: r.Properties.NumRangeKeyDels,
759 1 : RawKeySize: r.Properties.RawKeySize,
760 1 : RawPointTombstoneKeySize: r.Properties.RawPointTombstoneKeySize,
761 1 : RawPointTombstoneValueSize: r.Properties.RawPointTombstoneValueSize,
762 1 : RawValueSize: r.Properties.RawValueSize,
763 1 : SnapshotPinnedKeySize: r.Properties.SnapshotPinnedKeySize,
764 1 : SnapshotPinnedValueSize: r.Properties.SnapshotPinnedValueSize,
765 1 : SnapshotPinnedKeys: r.Properties.SnapshotPinnedKeys,
766 1 : TopLevelIndexSize: r.Properties.TopLevelIndexSize,
767 1 : })
768 1 : return r.Close()
769 : }
770 :
771 1 : func makePlural(singular string, count int64) string {
772 1 : if count > 1 {
773 1 : return fmt.Sprintf("%ss", singular)
774 1 : }
775 1 : return singular
776 : }
|