Line data Source code
1 : // Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package tool
6 :
7 : import (
8 : "bytes"
9 : "cmp"
10 : "fmt"
11 : "io"
12 : "slices"
13 : "sort"
14 :
15 : "github.com/cockroachdb/pebble"
16 : "github.com/cockroachdb/pebble/internal/base"
17 : "github.com/cockroachdb/pebble/internal/keyspan"
18 : "github.com/cockroachdb/pebble/internal/manifest"
19 : "github.com/cockroachdb/pebble/internal/private"
20 : "github.com/cockroachdb/pebble/internal/rangedel"
21 : "github.com/cockroachdb/pebble/record"
22 : "github.com/cockroachdb/pebble/sstable"
23 : "github.com/spf13/cobra"
24 : )
25 :
26 : type findRef struct {
27 : key base.InternalKey
28 : value []byte
29 : fileNum base.FileNum
30 : }
31 :
32 : // findT implements the find tool.
33 : //
34 : // TODO(bananabrick): Add support for virtual sstables in this tool. Currently,
35 : // the tool will work because we're parsing files from disk, so virtual sstables
36 : // will never be added to findT.tables. The manifest could contain information
37 : // about virtual sstables. This is fine because the manifest is only used to
38 : // compute the findT.editRefs, and editRefs is only used if a file in
39 : // findT.tables contains a key. Of course, the tool won't be completely
40 : // accurate without dealing with virtual sstable case.
41 : type findT struct {
42 : Root *cobra.Command
43 :
44 : // Configuration.
45 : opts *pebble.Options
46 : comparers sstable.Comparers
47 : mergers sstable.Mergers
48 :
49 : // Flags.
50 : comparerName string
51 : fmtKey keyFormatter
52 : fmtValue valueFormatter
53 : verbose bool
54 :
55 : // Map from file num to path on disk.
56 : files map[base.DiskFileNum]string
57 : // Map from file num to version edit index which references the file num.
58 : editRefs map[base.FileNum][]int
59 : // List of version edits.
60 : edits []manifest.VersionEdit
61 : // Sorted list of WAL file nums.
62 : logs []base.DiskFileNum
63 : // Sorted list of manifest file nums.
64 : manifests []base.DiskFileNum
65 : // Sorted list of table file nums.
66 : tables []base.FileNum
67 : // Set of tables that contains references to the search key.
68 : tableRefs map[base.FileNum]bool
69 : // Map from file num to table metadata.
70 : tableMeta map[base.FileNum]*manifest.FileMetadata
71 : // List of error messages for SSTables that could not be decoded.
72 : errors []string
73 : }
74 :
75 : func newFind(
76 : opts *pebble.Options,
77 : comparers sstable.Comparers,
78 : defaultComparer string,
79 : mergers sstable.Mergers,
80 1 : ) *findT {
81 1 : f := &findT{
82 1 : opts: opts,
83 1 : comparers: comparers,
84 1 : mergers: mergers,
85 1 : }
86 1 : f.fmtKey.mustSet("quoted")
87 1 : f.fmtValue.mustSet("[%x]")
88 1 :
89 1 : f.Root = &cobra.Command{
90 1 : Use: "find <dir> <key>",
91 1 : Short: "find references to the specified key",
92 1 : Long: `
93 1 : Find references to the specified key and any range tombstones that contain the
94 1 : key. This includes references to the key in WAL files and sstables, and the
95 1 : provenance of the sstables (flushed, ingested, compacted).
96 1 : `,
97 1 : Args: cobra.ExactArgs(2),
98 1 : Run: f.run,
99 1 : }
100 1 :
101 1 : f.Root.Flags().BoolVarP(
102 1 : &f.verbose, "verbose", "v", false, "verbose output")
103 1 : f.Root.Flags().StringVar(
104 1 : &f.comparerName, "comparer", defaultComparer, "comparer name")
105 1 : f.Root.Flags().Var(
106 1 : &f.fmtKey, "key", "key formatter")
107 1 : f.Root.Flags().Var(
108 1 : &f.fmtValue, "value", "value formatter")
109 1 : return f
110 1 : }
111 :
112 1 : func (f *findT) run(cmd *cobra.Command, args []string) {
113 1 : stdout, stderr := cmd.OutOrStdout(), cmd.OutOrStderr()
114 1 : var key key
115 1 : if err := key.Set(args[1]); err != nil {
116 0 : fmt.Fprintf(stdout, "%s\n", err)
117 0 : return
118 0 : }
119 :
120 1 : if err := f.findFiles(stdout, stderr, args[0]); err != nil {
121 1 : fmt.Fprintf(stdout, "%s\n", err)
122 1 : return
123 1 : }
124 1 : f.readManifests(stdout)
125 1 :
126 1 : f.opts.Comparer = f.comparers[f.comparerName]
127 1 : if f.opts.Comparer == nil {
128 0 : fmt.Fprintf(stderr, "unknown comparer %q", f.comparerName)
129 0 : return
130 0 : }
131 1 : f.fmtKey.setForComparer(f.opts.Comparer.Name, f.comparers)
132 1 : f.fmtValue.setForComparer(f.opts.Comparer.Name, f.comparers)
133 1 :
134 1 : refs := f.search(stdout, key)
135 1 : var lastFileNum base.FileNum
136 1 : for i := range refs {
137 1 : r := &refs[i]
138 1 : if lastFileNum != r.fileNum {
139 1 : lastFileNum = r.fileNum
140 1 : fmt.Fprintf(stdout, "%s", f.opts.FS.PathBase(f.files[r.fileNum.DiskFileNum()]))
141 1 : if m := f.tableMeta[r.fileNum]; m != nil {
142 1 : fmt.Fprintf(stdout, " ")
143 1 : formatKeyRange(stdout, f.fmtKey, &m.Smallest, &m.Largest)
144 1 : }
145 1 : fmt.Fprintf(stdout, "\n")
146 1 : if p := f.tableProvenance(r.fileNum); p != "" {
147 1 : fmt.Fprintf(stdout, " (%s)\n", p)
148 1 : }
149 : }
150 1 : fmt.Fprintf(stdout, " ")
151 1 : formatKeyValue(stdout, f.fmtKey, f.fmtValue, &r.key, r.value)
152 : }
153 :
154 1 : for _, errorMsg := range f.errors {
155 1 : fmt.Fprint(stdout, errorMsg)
156 1 : }
157 : }
158 :
159 : // Find all of the manifests, logs, and tables in the specified directory.
160 1 : func (f *findT) findFiles(stdout, stderr io.Writer, dir string) error {
161 1 : f.files = make(map[base.DiskFileNum]string)
162 1 : f.editRefs = make(map[base.FileNum][]int)
163 1 : f.logs = nil
164 1 : f.manifests = nil
165 1 : f.tables = nil
166 1 : f.tableMeta = make(map[base.FileNum]*manifest.FileMetadata)
167 1 :
168 1 : if _, err := f.opts.FS.Stat(dir); err != nil {
169 1 : return err
170 1 : }
171 :
172 1 : walk(stderr, f.opts.FS, dir, func(path string) {
173 1 : ft, fileNum, ok := base.ParseFilename(f.opts.FS, path)
174 1 : if !ok {
175 1 : return
176 1 : }
177 1 : switch ft {
178 1 : case base.FileTypeLog:
179 1 : f.logs = append(f.logs, fileNum)
180 1 : case base.FileTypeManifest:
181 1 : f.manifests = append(f.manifests, fileNum)
182 1 : case base.FileTypeTable:
183 1 : f.tables = append(f.tables, fileNum.FileNum())
184 1 : default:
185 1 : return
186 : }
187 1 : f.files[fileNum] = path
188 : })
189 :
190 1 : slices.Sort(f.logs)
191 1 : slices.Sort(f.manifests)
192 1 : slices.Sort(f.tables)
193 1 :
194 1 : if f.verbose {
195 1 : fmt.Fprintf(stdout, "%s\n", dir)
196 1 : fmt.Fprintf(stdout, "%5d %s\n", len(f.manifests), makePlural("manifest", int64(len(f.manifests))))
197 1 : fmt.Fprintf(stdout, "%5d %s\n", len(f.logs), makePlural("log", int64(len(f.logs))))
198 1 : fmt.Fprintf(stdout, "%5d %s\n", len(f.tables), makePlural("sstable", int64(len(f.tables))))
199 1 : }
200 1 : return nil
201 : }
202 :
203 : // Read the manifests and populate the editRefs map which is used to determine
204 : // the provenance and metadata of tables.
205 1 : func (f *findT) readManifests(stdout io.Writer) {
206 1 : for _, fileNum := range f.manifests {
207 1 : func() {
208 1 : path := f.files[fileNum]
209 1 : mf, err := f.opts.FS.Open(path)
210 1 : if err != nil {
211 0 : fmt.Fprintf(stdout, "%s\n", err)
212 0 : return
213 0 : }
214 1 : defer mf.Close()
215 1 :
216 1 : if f.verbose {
217 1 : fmt.Fprintf(stdout, "%s\n", path)
218 1 : }
219 :
220 1 : rr := record.NewReader(mf, 0 /* logNum */)
221 1 : for {
222 1 : r, err := rr.Next()
223 1 : if err != nil {
224 1 : if err != io.EOF {
225 0 : fmt.Fprintf(stdout, "%s: %s\n", path, err)
226 0 : }
227 1 : break
228 : }
229 :
230 1 : var ve manifest.VersionEdit
231 1 : if err := ve.Decode(r); err != nil {
232 0 : fmt.Fprintf(stdout, "%s: %s\n", path, err)
233 0 : break
234 : }
235 1 : i := len(f.edits)
236 1 : f.edits = append(f.edits, ve)
237 1 :
238 1 : if ve.ComparerName != "" {
239 1 : f.comparerName = ve.ComparerName
240 1 : }
241 1 : if num := ve.MinUnflushedLogNum; num != 0 {
242 1 : f.editRefs[num.FileNum()] = append(f.editRefs[num.FileNum()], i)
243 1 : }
244 1 : for df := range ve.DeletedFiles {
245 1 : f.editRefs[df.FileNum] = append(f.editRefs[df.FileNum], i)
246 1 : }
247 1 : for _, nf := range ve.NewFiles {
248 1 : // The same file can be deleted and added in a single version edit
249 1 : // which indicates a "move" compaction. Only add the edit to the list
250 1 : // once.
251 1 : refs := f.editRefs[nf.Meta.FileNum]
252 1 : if n := len(refs); n == 0 || refs[n-1] != i {
253 1 : f.editRefs[nf.Meta.FileNum] = append(refs, i)
254 1 : }
255 1 : if _, ok := f.tableMeta[nf.Meta.FileNum]; !ok {
256 1 : f.tableMeta[nf.Meta.FileNum] = nf.Meta
257 1 : }
258 : }
259 : }
260 : }()
261 : }
262 :
263 1 : if f.verbose {
264 1 : fmt.Fprintf(stdout, "%5d %s\n", len(f.edits), makePlural("edit", int64(len(f.edits))))
265 1 : }
266 : }
267 :
268 : // Search the logs and sstables for references to the specified key.
269 1 : func (f *findT) search(stdout io.Writer, key []byte) []findRef {
270 1 : refs := f.searchLogs(stdout, key, nil)
271 1 : refs = f.searchTables(stdout, key, refs)
272 1 :
273 1 : // For a given file (log or table) the references are already in the correct
274 1 : // order. We simply want to order the references by fileNum using a stable
275 1 : // sort.
276 1 : //
277 1 : // TODO(peter): I'm not sure if this is perfectly correct with regards to log
278 1 : // files and ingested sstables, but it is close enough and doing something
279 1 : // better is onerous. Revisit if this ever becomes problematic (e.g. if we
280 1 : // allow finding more than one key at a time).
281 1 : //
282 1 : // An example of the problem with logs and ingestion (which can only occur
283 1 : // with distinct keys). If I write key "a" to a log, I can then ingested key
284 1 : // "b" without causing "a" to be flushed. Then I can write key "c" to the
285 1 : // log. Ideally, we'd show the key "a" from the log, then the key "b" from
286 1 : // the ingested sstable, then key "c" from the log.
287 1 : slices.SortStableFunc(refs, func(a, b findRef) int {
288 1 : return cmp.Compare(a.fileNum, b.fileNum)
289 1 : })
290 1 : return refs
291 : }
292 :
293 : // Search the logs for references to the specified key.
294 1 : func (f *findT) searchLogs(stdout io.Writer, searchKey []byte, refs []findRef) []findRef {
295 1 : cmp := f.opts.Comparer.Compare
296 1 : for _, fileNum := range f.logs {
297 1 : _ = func() (err error) {
298 1 : path := f.files[fileNum]
299 1 : lf, err := f.opts.FS.Open(path)
300 1 : if err != nil {
301 0 : fmt.Fprintf(stdout, "%s\n", err)
302 0 : return
303 0 : }
304 1 : defer lf.Close()
305 1 :
306 1 : if f.verbose {
307 1 : fmt.Fprintf(stdout, "%s", path)
308 1 : defer fmt.Fprintf(stdout, "\n")
309 1 : }
310 1 : defer func() {
311 1 : switch err {
312 0 : case record.ErrZeroedChunk:
313 0 : if f.verbose {
314 0 : fmt.Fprintf(stdout, ": EOF [%s] (may be due to WAL preallocation)", err)
315 0 : }
316 0 : case record.ErrInvalidChunk:
317 0 : if f.verbose {
318 0 : fmt.Fprintf(stdout, ": EOF [%s] (may be due to WAL recycling)", err)
319 0 : }
320 1 : default:
321 1 : if err != io.EOF {
322 0 : if f.verbose {
323 0 : fmt.Fprintf(stdout, ": %s", err)
324 0 : } else {
325 0 : fmt.Fprintf(stdout, "%s: %s\n", path, err)
326 0 : }
327 : }
328 : }
329 : }()
330 :
331 1 : var b pebble.Batch
332 1 : var buf bytes.Buffer
333 1 : rr := record.NewReader(lf, fileNum)
334 1 : for {
335 1 : r, err := rr.Next()
336 1 : if err == nil {
337 1 : buf.Reset()
338 1 : _, err = io.Copy(&buf, r)
339 1 : }
340 1 : if err != nil {
341 1 : return err
342 1 : }
343 :
344 1 : b = pebble.Batch{}
345 1 : if err := b.SetRepr(buf.Bytes()); err != nil {
346 0 : fmt.Fprintf(stdout, "%s: corrupt log file: %v", path, err)
347 0 : continue
348 : }
349 1 : seqNum := b.SeqNum()
350 1 : for r := b.Reader(); ; seqNum++ {
351 1 : kind, ukey, value, ok, err := r.Next()
352 1 : if !ok {
353 1 : if err != nil {
354 0 : fmt.Fprintf(stdout, "%s: corrupt log file: %v", path, err)
355 0 : break
356 : }
357 1 : break
358 : }
359 1 : ikey := base.MakeInternalKey(ukey, seqNum, kind)
360 1 : switch kind {
361 : case base.InternalKeyKindDelete,
362 : base.InternalKeyKindDeleteSized,
363 : base.InternalKeyKindSet,
364 : base.InternalKeyKindMerge,
365 : base.InternalKeyKindSingleDelete,
366 1 : base.InternalKeyKindSetWithDelete:
367 1 : if cmp(searchKey, ikey.UserKey) != 0 {
368 1 : continue
369 : }
370 1 : case base.InternalKeyKindRangeDelete:
371 1 : // Output tombstones that contain or end with the search key.
372 1 : t := rangedel.Decode(ikey, value, nil)
373 1 : if !t.Contains(cmp, searchKey) && cmp(t.End, searchKey) != 0 {
374 1 : continue
375 : }
376 0 : default:
377 0 : continue
378 : }
379 :
380 1 : refs = append(refs, findRef{
381 1 : key: ikey.Clone(),
382 1 : value: append([]byte(nil), value...),
383 1 : fileNum: fileNum.FileNum(),
384 1 : })
385 : }
386 : }
387 : }()
388 : }
389 1 : return refs
390 : }
391 :
392 : // Search the tables for references to the specified key.
393 1 : func (f *findT) searchTables(stdout io.Writer, searchKey []byte, refs []findRef) []findRef {
394 1 : cache := pebble.NewCache(128 << 20 /* 128 MB */)
395 1 : defer cache.Unref()
396 1 :
397 1 : f.tableRefs = make(map[base.FileNum]bool)
398 1 : for _, fileNum := range f.tables {
399 1 : _ = func() (err error) {
400 1 : path := f.files[fileNum.DiskFileNum()]
401 1 : tf, err := f.opts.FS.Open(path)
402 1 : if err != nil {
403 0 : fmt.Fprintf(stdout, "%s\n", err)
404 0 : return
405 0 : }
406 :
407 1 : m := f.tableMeta[fileNum]
408 1 : if f.verbose {
409 1 : fmt.Fprintf(stdout, "%s", path)
410 1 : if m != nil && m.SmallestSeqNum == m.LargestSeqNum {
411 1 : fmt.Fprintf(stdout, ": global seqnum: %d", m.LargestSeqNum)
412 1 : }
413 1 : defer fmt.Fprintf(stdout, "\n")
414 : }
415 1 : defer func() {
416 1 : switch {
417 0 : case err != nil:
418 0 : if f.verbose {
419 0 : fmt.Fprintf(stdout, ": %v", err)
420 0 : } else {
421 0 : fmt.Fprintf(stdout, "%s: %v\n", path, err)
422 0 : }
423 : }
424 : }()
425 :
426 1 : opts := sstable.ReaderOptions{
427 1 : Cache: cache,
428 1 : Comparer: f.opts.Comparer,
429 1 : Filters: f.opts.Filters,
430 1 : }
431 1 : readable, err := sstable.NewSimpleReadable(tf)
432 1 : if err != nil {
433 0 : return err
434 0 : }
435 1 : r, err := sstable.NewReader(readable, opts, f.comparers, f.mergers,
436 1 : private.SSTableRawTombstonesOpt.(sstable.ReaderOption))
437 1 : if err != nil {
438 1 : f.errors = append(f.errors, fmt.Sprintf("Unable to decode sstable %s, %s", f.files[fileNum.DiskFileNum()], err.Error()))
439 1 : // Ensure the error only gets printed once.
440 1 : err = nil
441 1 : return
442 1 : }
443 1 : defer r.Close()
444 1 :
445 1 : if m != nil && m.SmallestSeqNum == m.LargestSeqNum {
446 1 : r.Properties.GlobalSeqNum = m.LargestSeqNum
447 1 : }
448 :
449 1 : iter, err := r.NewIter(nil, nil)
450 1 : if err != nil {
451 0 : return err
452 0 : }
453 1 : defer iter.Close()
454 1 : key, value := iter.SeekGE(searchKey, base.SeekGEFlagsNone)
455 1 :
456 1 : // We configured sstable.Reader to return raw tombstones which requires a
457 1 : // bit more work here to put them in a form that can be iterated in
458 1 : // parallel with the point records.
459 1 : rangeDelIter, err := func() (keyspan.FragmentIterator, error) {
460 1 : iter, err := r.NewRawRangeDelIter()
461 1 : if err != nil {
462 0 : return nil, err
463 0 : }
464 1 : if iter == nil {
465 1 : return keyspan.NewIter(r.Compare, nil), nil
466 1 : }
467 1 : defer iter.Close()
468 1 :
469 1 : var tombstones []keyspan.Span
470 1 : for t := iter.First(); t != nil; t = iter.Next() {
471 1 : if !t.Contains(r.Compare, searchKey) {
472 1 : continue
473 : }
474 1 : tombstones = append(tombstones, t.ShallowClone())
475 : }
476 :
477 1 : slices.SortFunc(tombstones, func(a, b keyspan.Span) int {
478 0 : return r.Compare(a.Start, b.Start)
479 0 : })
480 1 : return keyspan.NewIter(r.Compare, tombstones), nil
481 : }()
482 1 : if err != nil {
483 0 : return err
484 0 : }
485 :
486 1 : defer rangeDelIter.Close()
487 1 : rangeDel := rangeDelIter.First()
488 1 :
489 1 : foundRef := false
490 1 : for key != nil || rangeDel != nil {
491 1 : if key != nil &&
492 1 : (rangeDel == nil || r.Compare(key.UserKey, rangeDel.Start) < 0) {
493 1 : if r.Compare(searchKey, key.UserKey) != 0 {
494 1 : key, value = nil, base.LazyValue{}
495 1 : continue
496 : }
497 1 : v, _, err := value.Value(nil)
498 1 : if err != nil {
499 0 : return err
500 0 : }
501 1 : refs = append(refs, findRef{
502 1 : key: key.Clone(),
503 1 : value: append([]byte(nil), v...),
504 1 : fileNum: fileNum,
505 1 : })
506 1 : key, value = iter.Next()
507 1 : } else {
508 1 : // Use rangedel.Encode to add a reference for each key
509 1 : // within the span.
510 1 : err := rangedel.Encode(rangeDel, func(k base.InternalKey, v []byte) error {
511 1 : refs = append(refs, findRef{
512 1 : key: k.Clone(),
513 1 : value: append([]byte(nil), v...),
514 1 : fileNum: fileNum,
515 1 : })
516 1 : return nil
517 1 : })
518 1 : if err != nil {
519 0 : return err
520 0 : }
521 1 : rangeDel = rangeDelIter.Next()
522 : }
523 1 : foundRef = true
524 : }
525 :
526 1 : if foundRef {
527 1 : f.tableRefs[fileNum] = true
528 1 : }
529 1 : return nil
530 : }()
531 : }
532 1 : return refs
533 : }
534 :
535 : // Determine the provenance of the specified table. We search the version edits
536 : // for the first edit which created the table, and then analyze the edit to
537 : // determine if it was a compaction, flush, or ingestion. Returns an empty
538 : // string if the provenance of a table cannot be determined.
539 1 : func (f *findT) tableProvenance(fileNum base.FileNum) string {
540 1 : editRefs := f.editRefs[fileNum]
541 1 : for len(editRefs) > 0 {
542 1 : ve := f.edits[editRefs[0]]
543 1 : editRefs = editRefs[1:]
544 1 : for _, nf := range ve.NewFiles {
545 1 : if fileNum != nf.Meta.FileNum {
546 1 : continue
547 : }
548 :
549 1 : var buf bytes.Buffer
550 1 : switch {
551 1 : case len(ve.DeletedFiles) > 0:
552 1 : // A version edit with deleted files is a compaction. The deleted
553 1 : // files are the inputs to the compaction. We're going to
554 1 : // reconstruct the input files and display those inputs that
555 1 : // contain the search key (i.e. are list in refs) and use an
556 1 : // ellipsis to indicate when there were other inputs that have
557 1 : // been elided.
558 1 : var sourceLevels []int
559 1 : levels := make(map[int][]base.FileNum)
560 1 : for df := range ve.DeletedFiles {
561 1 : files := levels[df.Level]
562 1 : if len(files) == 0 {
563 1 : sourceLevels = append(sourceLevels, df.Level)
564 1 : }
565 1 : levels[df.Level] = append(files, df.FileNum)
566 : }
567 :
568 1 : sort.Ints(sourceLevels)
569 1 : if sourceLevels[len(sourceLevels)-1] != nf.Level {
570 0 : sourceLevels = append(sourceLevels, nf.Level)
571 0 : }
572 :
573 1 : sep := " "
574 1 : fmt.Fprintf(&buf, "compacted")
575 1 : for _, level := range sourceLevels {
576 1 : files := levels[level]
577 1 : slices.Sort(files)
578 1 :
579 1 : fmt.Fprintf(&buf, "%sL%d [", sep, level)
580 1 : sep = ""
581 1 : elided := false
582 1 : for _, fileNum := range files {
583 1 : if f.tableRefs[fileNum] {
584 1 : fmt.Fprintf(&buf, "%s%s", sep, fileNum)
585 1 : sep = " "
586 1 : } else {
587 1 : elided = true
588 1 : }
589 : }
590 1 : if elided {
591 1 : fmt.Fprintf(&buf, "%s...", sep)
592 1 : }
593 1 : fmt.Fprintf(&buf, "]")
594 1 : sep = " + "
595 : }
596 :
597 1 : case ve.MinUnflushedLogNum != 0:
598 1 : // A version edit with a min-unflushed log indicates a flush
599 1 : // operation.
600 1 : fmt.Fprintf(&buf, "flushed to L%d", nf.Level)
601 :
602 1 : case nf.Meta.SmallestSeqNum == nf.Meta.LargestSeqNum:
603 1 : // If the smallest and largest seqnum are the same, the file was
604 1 : // ingested. Note that this can also occur for a flushed sstable
605 1 : // that contains only a single key, though that would have
606 1 : // already been captured above.
607 1 : fmt.Fprintf(&buf, "ingested to L%d", nf.Level)
608 :
609 0 : default:
610 0 : // The provenance of the table is unclear. This is usually due to
611 0 : // the MANIFEST rolling over and taking a snapshot of the LSM
612 0 : // state.
613 0 : fmt.Fprintf(&buf, "added to L%d", nf.Level)
614 : }
615 :
616 : // After a table is created, it can be moved to a different level via a
617 : // move compaction. This is indicated by a version edit that deletes the
618 : // table from one level and adds the same table to a different
619 : // level. Loop over the remaining version edits for the table looking for
620 : // such moves.
621 1 : for len(editRefs) > 0 {
622 1 : ve := f.edits[editRefs[0]]
623 1 : editRefs = editRefs[1:]
624 1 : for _, nf := range ve.NewFiles {
625 1 : if fileNum == nf.Meta.FileNum {
626 1 : for df := range ve.DeletedFiles {
627 1 : if fileNum == df.FileNum {
628 1 : fmt.Fprintf(&buf, ", moved to L%d", nf.Level)
629 1 : break
630 : }
631 : }
632 1 : break
633 : }
634 : }
635 : }
636 :
637 1 : return buf.String()
638 : }
639 : }
640 1 : return ""
641 : }
|