LCOV - code coverage report
Current view: top level - pebble - checkpoint.go (source / functions) Hit Total Coverage
Test: 2024-08-03 08:16Z cda4471a - tests + meta.lcov Lines: 242 303 79.9 %
Date: 2024-08-03 08:17:19 Functions: 0 0 -

          Line data    Source code
       1             : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
       2             : // of this source code is governed by a BSD-style license that can be found in
       3             : // the LICENSE file.
       4             : 
       5             : package pebble
       6             : 
       7             : import (
       8             :         "io"
       9             :         "os"
      10             : 
      11             :         "github.com/cockroachdb/errors"
      12             :         "github.com/cockroachdb/errors/oserror"
      13             :         "github.com/cockroachdb/pebble/internal/base"
      14             :         "github.com/cockroachdb/pebble/record"
      15             :         "github.com/cockroachdb/pebble/vfs"
      16             :         "github.com/cockroachdb/pebble/vfs/atomicfs"
      17             :         "github.com/cockroachdb/pebble/wal"
      18             : )
      19             : 
      20             : // checkpointOptions hold the optional parameters to construct checkpoint
      21             : // snapshots.
      22             : type checkpointOptions struct {
      23             :         // flushWAL set to true will force a flush and sync of the WAL prior to
      24             :         // checkpointing.
      25             :         flushWAL bool
      26             : 
      27             :         // If set, any SSTs that don't overlap with these spans are excluded from a checkpoint.
      28             :         restrictToSpans []CheckpointSpan
      29             : }
      30             : 
      31             : // CheckpointOption set optional parameters used by `DB.Checkpoint`.
      32             : type CheckpointOption func(*checkpointOptions)
      33             : 
      34             : // WithFlushedWAL enables flushing and syncing the WAL prior to constructing a
      35             : // checkpoint. This guarantees that any writes committed before calling
      36             : // DB.Checkpoint will be part of that checkpoint.
      37             : //
      38             : // Note that this setting can only be useful in cases when some writes are
      39             : // performed with Sync = false. Otherwise, the guarantee will already be met.
      40             : //
      41             : // Passing this option is functionally equivalent to calling
      42             : // DB.LogData(nil, Sync) right before DB.Checkpoint.
      43           1 : func WithFlushedWAL() CheckpointOption {
      44           1 :         return func(opt *checkpointOptions) {
      45           1 :                 opt.flushWAL = true
      46           1 :         }
      47             : }
      48             : 
      49             : // WithRestrictToSpans specifies spans of interest for the checkpoint. Any SSTs
      50             : // that don't overlap with any of these spans are excluded from the checkpoint.
      51             : //
      52             : // Note that the checkpoint can still surface keys outside of these spans (from
      53             : // the WAL and from SSTs that partially overlap with these spans). Moreover,
      54             : // these surface keys aren't necessarily "valid" in that they could have been
      55             : // modified but the SST containing the modification is excluded.
      56           2 : func WithRestrictToSpans(spans []CheckpointSpan) CheckpointOption {
      57           2 :         return func(opt *checkpointOptions) {
      58           2 :                 opt.restrictToSpans = spans
      59           2 :         }
      60             : }
      61             : 
      62             : // CheckpointSpan is a key range [Start, End) (inclusive on Start, exclusive on
      63             : // End) of interest for a checkpoint.
      64             : type CheckpointSpan struct {
      65             :         Start []byte
      66             :         End   []byte
      67             : }
      68             : 
      69             : // excludeFromCheckpoint returns true if an SST file should be excluded from the
      70             : // checkpoint because it does not overlap with the spans of interest
      71             : // (opt.restrictToSpans).
      72           2 : func excludeFromCheckpoint(f *fileMetadata, opt *checkpointOptions, cmp Compare) bool {
      73           2 :         if len(opt.restrictToSpans) == 0 {
      74           2 :                 // Option not set; don't exclude anything.
      75           2 :                 return false
      76           2 :         }
      77           2 :         for _, s := range opt.restrictToSpans {
      78           2 :                 spanBounds := base.UserKeyBoundsEndExclusive(s.Start, s.End)
      79           2 :                 if f.Overlaps(cmp, &spanBounds) {
      80           2 :                         return false
      81           2 :                 }
      82             :         }
      83             :         // None of the restrictToSpans overlapped; we can exclude this file.
      84           2 :         return true
      85             : }
      86             : 
      87             : // mkdirAllAndSyncParents creates destDir and any of its missing parents.
      88             : // Those missing parents, as well as the closest existing ancestor, are synced.
      89             : // Returns a handle to the directory created at destDir.
      90           2 : func mkdirAllAndSyncParents(fs vfs.FS, destDir string) (vfs.File, error) {
      91           2 :         // Collect paths for all directories between destDir (excluded) and its
      92           2 :         // closest existing ancestor (included).
      93           2 :         var parentPaths []string
      94           2 :         foundExistingAncestor := false
      95           2 :         for parentPath := fs.PathDir(destDir); parentPath != "."; parentPath = fs.PathDir(parentPath) {
      96           2 :                 parentPaths = append(parentPaths, parentPath)
      97           2 :                 _, err := fs.Stat(parentPath)
      98           2 :                 if err == nil {
      99           2 :                         // Exit loop at the closest existing ancestor.
     100           2 :                         foundExistingAncestor = true
     101           2 :                         break
     102             :                 }
     103           2 :                 if !oserror.IsNotExist(err) {
     104           0 :                         return nil, err
     105           0 :                 }
     106             :         }
     107             :         // Handle empty filesystem edge case.
     108           2 :         if !foundExistingAncestor {
     109           2 :                 parentPaths = append(parentPaths, "")
     110           2 :         }
     111             :         // Create destDir and any of its missing parents.
     112           2 :         if err := fs.MkdirAll(destDir, 0755); err != nil {
     113           1 :                 return nil, err
     114           1 :         }
     115             :         // Sync all the parent directories up to the closest existing ancestor,
     116             :         // included.
     117           2 :         for _, parentPath := range parentPaths {
     118           2 :                 parentDir, err := fs.OpenDir(parentPath)
     119           2 :                 if err != nil {
     120           1 :                         return nil, err
     121           1 :                 }
     122           2 :                 err = parentDir.Sync()
     123           2 :                 if err != nil {
     124           1 :                         _ = parentDir.Close()
     125           1 :                         return nil, err
     126           1 :                 }
     127           2 :                 err = parentDir.Close()
     128           2 :                 if err != nil {
     129           0 :                         return nil, err
     130           0 :                 }
     131             :         }
     132           2 :         return fs.OpenDir(destDir)
     133             : }
     134             : 
     135             : // Checkpoint constructs a snapshot of the DB instance in the specified
     136             : // directory. The WAL, MANIFEST, OPTIONS, and sstables will be copied into the
     137             : // snapshot. Hard links will be used when possible. Beware of the significant
     138             : // space overhead for a checkpoint if hard links are disabled. Also beware that
     139             : // even if hard links are used, the space overhead for the checkpoint will
     140             : // increase over time as the DB performs compactions.
     141             : //
     142             : // Note that shared files in a checkpoint could get deleted if the DB is
     143             : // restarted after a checkpoint operation, as the reference for the checkpoint
     144             : // is only maintained in memory. This is okay as long as users of Checkpoint
     145             : // crash shortly afterwards with a "poison file" preventing further restarts.
     146             : func (d *DB) Checkpoint(
     147             :         destDir string, opts ...CheckpointOption,
     148             : ) (
     149             :         ckErr error, /* used in deferred cleanup */
     150           2 : ) {
     151           2 :         opt := &checkpointOptions{}
     152           2 :         for _, fn := range opts {
     153           2 :                 fn(opt)
     154           2 :         }
     155             : 
     156           2 :         if _, err := d.opts.FS.Stat(destDir); !oserror.IsNotExist(err) {
     157           1 :                 if err == nil {
     158           1 :                         return &os.PathError{
     159           1 :                                 Op:   "checkpoint",
     160           1 :                                 Path: destDir,
     161           1 :                                 Err:  oserror.ErrExist,
     162           1 :                         }
     163           1 :                 }
     164           0 :                 return err
     165             :         }
     166             : 
     167           2 :         if opt.flushWAL && !d.opts.DisableWAL {
     168           1 :                 // Write an empty log-data record to flush and sync the WAL.
     169           1 :                 if err := d.LogData(nil /* data */, Sync); err != nil {
     170           0 :                         return err
     171           0 :                 }
     172             :         }
     173             : 
     174             :         // Disable file deletions.
     175           2 :         d.mu.Lock()
     176           2 :         d.disableFileDeletions()
     177           2 :         defer func() {
     178           2 :                 d.mu.Lock()
     179           2 :                 defer d.mu.Unlock()
     180           2 :                 d.enableFileDeletions()
     181           2 :         }()
     182             : 
     183             :         // TODO(peter): RocksDB provides the option to roll the manifest if the
     184             :         // MANIFEST size is too large. Should we do this too?
     185             : 
     186             :         // Lock the manifest before getting the current version. We need the
     187             :         // length of the manifest that we read to match the current version that
     188             :         // we read, otherwise we might copy a versionEdit not reflected in the
     189             :         // sstables we copy/link.
     190           2 :         d.mu.versions.logLock()
     191           2 :         // Get the unflushed log files, the current version, and the current manifest
     192           2 :         // file number.
     193           2 :         memQueue := d.mu.mem.queue
     194           2 :         current := d.mu.versions.currentVersion()
     195           2 :         formatVers := d.FormatMajorVersion()
     196           2 :         manifestFileNum := d.mu.versions.manifestFileNum
     197           2 :         manifestSize := d.mu.versions.manifest.Size()
     198           2 :         optionsFileNum := d.optionsFileNum
     199           2 : 
     200           2 :         virtualBackingFiles := make(map[base.DiskFileNum]struct{})
     201           2 :         d.mu.versions.virtualBackings.ForEach(func(backing *fileBacking) {
     202           2 :                 virtualBackingFiles[backing.DiskFileNum] = struct{}{}
     203           2 :         })
     204             : 
     205           2 :         queuedLogNums := make([]wal.NumWAL, 0, len(memQueue))
     206           2 :         for i := range memQueue {
     207           2 :                 if logNum := memQueue[i].logNum; logNum != 0 {
     208           2 :                         queuedLogNums = append(queuedLogNums, wal.NumWAL(logNum))
     209           2 :                 }
     210             :         }
     211             :         // Release the manifest and DB.mu so we don't block other operations on
     212             :         // the database.
     213           2 :         d.mu.versions.logUnlock()
     214           2 :         d.mu.Unlock()
     215           2 : 
     216           2 :         allLogicalLogs, err := d.mu.log.manager.List()
     217           2 :         if err != nil {
     218           0 :                 return err
     219           0 :         }
     220             : 
     221             :         // Wrap the normal filesystem with one which wraps newly created files with
     222             :         // vfs.NewSyncingFile.
     223           2 :         fs := vfs.NewSyncingFS(d.opts.FS, vfs.SyncingFileOptions{
     224           2 :                 NoSyncOnClose: d.opts.NoSyncOnClose,
     225           2 :                 BytesPerSync:  d.opts.BytesPerSync,
     226           2 :         })
     227           2 : 
     228           2 :         // Create the dir and its parents (if necessary), and sync them.
     229           2 :         var dir vfs.File
     230           2 :         defer func() {
     231           2 :                 if dir != nil {
     232           0 :                         _ = dir.Close()
     233           0 :                 }
     234           2 :                 if ckErr != nil {
     235           0 :                         // Attempt to cleanup on error.
     236           0 :                         _ = fs.RemoveAll(destDir)
     237           0 :                 }
     238             :         }()
     239           2 :         dir, ckErr = mkdirAllAndSyncParents(fs, destDir)
     240           2 :         if ckErr != nil {
     241           0 :                 return ckErr
     242           0 :         }
     243             : 
     244           2 :         {
     245           2 :                 // Link or copy the OPTIONS.
     246           2 :                 srcPath := base.MakeFilepath(fs, d.dirname, fileTypeOptions, optionsFileNum)
     247           2 :                 destPath := fs.PathJoin(destDir, fs.PathBase(srcPath))
     248           2 :                 ckErr = vfs.LinkOrCopy(fs, srcPath, destPath)
     249           2 :                 if ckErr != nil {
     250           0 :                         return ckErr
     251           0 :                 }
     252             :         }
     253             : 
     254           2 :         {
     255           2 :                 // Set the format major version in the destination directory.
     256           2 :                 var versionMarker *atomicfs.Marker
     257           2 :                 versionMarker, _, ckErr = atomicfs.LocateMarker(fs, destDir, formatVersionMarkerName)
     258           2 :                 if ckErr != nil {
     259           0 :                         return ckErr
     260           0 :                 }
     261             : 
     262             :                 // We use the marker to encode the active format version in the
     263             :                 // marker filename. Unlike other uses of the atomic marker,
     264             :                 // there is no file with the filename `formatVers.String()` on
     265             :                 // the filesystem.
     266           2 :                 ckErr = versionMarker.Move(formatVers.String())
     267           2 :                 if ckErr != nil {
     268           0 :                         return ckErr
     269           0 :                 }
     270           2 :                 ckErr = versionMarker.Close()
     271           2 :                 if ckErr != nil {
     272           0 :                         return ckErr
     273           0 :                 }
     274             :         }
     275             : 
     276           2 :         var excludedFiles map[deletedFileEntry]*fileMetadata
     277           2 :         var remoteFiles []base.DiskFileNum
     278           2 :         // Set of FileBacking.DiskFileNum which will be required by virtual sstables
     279           2 :         // in the checkpoint.
     280           2 :         requiredVirtualBackingFiles := make(map[base.DiskFileNum]struct{})
     281           2 :         // Link or copy the sstables.
     282           2 :         for l := range current.Levels {
     283           2 :                 iter := current.Levels[l].Iter()
     284           2 :                 for f := iter.First(); f != nil; f = iter.Next() {
     285           2 :                         if excludeFromCheckpoint(f, opt, d.cmp) {
     286           2 :                                 if excludedFiles == nil {
     287           2 :                                         excludedFiles = make(map[deletedFileEntry]*fileMetadata)
     288           2 :                                 }
     289           2 :                                 excludedFiles[deletedFileEntry{
     290           2 :                                         Level:   l,
     291           2 :                                         FileNum: f.FileNum,
     292           2 :                                 }] = f
     293           2 :                                 continue
     294             :                         }
     295             : 
     296           2 :                         fileBacking := f.FileBacking
     297           2 :                         if f.Virtual {
     298           2 :                                 if _, ok := requiredVirtualBackingFiles[fileBacking.DiskFileNum]; ok {
     299           2 :                                         continue
     300             :                                 }
     301           2 :                                 requiredVirtualBackingFiles[fileBacking.DiskFileNum] = struct{}{}
     302             :                         }
     303           2 :                         meta, err := d.objProvider.Lookup(fileTypeTable, fileBacking.DiskFileNum)
     304           2 :                         if err != nil {
     305           0 :                                 ckErr = err
     306           0 :                                 return ckErr
     307           0 :                         }
     308           2 :                         if meta.IsRemote() {
     309           1 :                                 // We don't copy remote files. This is desirable as checkpointing is
     310           1 :                                 // supposed to be a fast operation, and references to remote files can
     311           1 :                                 // always be resolved by any checkpoint readers by reading the object
     312           1 :                                 // catalog. We don't add this file to excludedFiles either, as that'd
     313           1 :                                 // cause it to be deleted in the second manifest entry which is also
     314           1 :                                 // inaccurate.
     315           1 :                                 remoteFiles = append(remoteFiles, meta.DiskFileNum)
     316           1 :                                 continue
     317             :                         }
     318             : 
     319           2 :                         srcPath := base.MakeFilepath(fs, d.dirname, fileTypeTable, fileBacking.DiskFileNum)
     320           2 :                         destPath := fs.PathJoin(destDir, fs.PathBase(srcPath))
     321           2 :                         ckErr = vfs.LinkOrCopy(fs, srcPath, destPath)
     322           2 :                         if ckErr != nil {
     323           0 :                                 return ckErr
     324           0 :                         }
     325             :                 }
     326             :         }
     327             : 
     328           2 :         var removeBackingTables []base.DiskFileNum
     329           2 :         for diskFileNum := range virtualBackingFiles {
     330           2 :                 if _, ok := requiredVirtualBackingFiles[diskFileNum]; !ok {
     331           2 :                         // The backing sstable associated with fileNum is no longer
     332           2 :                         // required.
     333           2 :                         removeBackingTables = append(removeBackingTables, diskFileNum)
     334           2 :                 }
     335             :         }
     336             : 
     337           2 :         ckErr = d.writeCheckpointManifest(
     338           2 :                 fs, formatVers, destDir, dir, manifestFileNum, manifestSize,
     339           2 :                 excludedFiles, removeBackingTables,
     340           2 :         )
     341           2 :         if ckErr != nil {
     342           0 :                 return ckErr
     343           0 :         }
     344           2 :         if len(remoteFiles) > 0 {
     345           1 :                 ckErr = d.objProvider.CheckpointState(fs, destDir, fileTypeTable, remoteFiles)
     346           1 :                 if ckErr != nil {
     347           0 :                         return ckErr
     348           0 :                 }
     349             :         }
     350             : 
     351             :         // Copy the WAL files. We copy rather than link because WAL file recycling
     352             :         // will cause the WAL files to be reused which would invalidate the
     353             :         // checkpoint.
     354           2 :         for _, logNum := range queuedLogNums {
     355           2 :                 log, ok := allLogicalLogs.Get(logNum)
     356           2 :                 if !ok {
     357           0 :                         return errors.Newf("log %s not found", logNum)
     358           0 :                 }
     359           2 :                 for i := 0; i < log.NumSegments(); i++ {
     360           2 :                         srcFS, srcPath := log.SegmentLocation(i)
     361           2 :                         destPath := fs.PathJoin(destDir, srcFS.PathBase(srcPath))
     362           2 :                         ckErr = vfs.CopyAcrossFS(srcFS, srcPath, fs, destPath)
     363           2 :                         if ckErr != nil {
     364           0 :                                 return ckErr
     365           0 :                         }
     366             :                 }
     367             :         }
     368             : 
     369             :         // Sync and close the checkpoint directory.
     370           2 :         ckErr = dir.Sync()
     371           2 :         if ckErr != nil {
     372           0 :                 return ckErr
     373           0 :         }
     374           2 :         ckErr = dir.Close()
     375           2 :         dir = nil
     376           2 :         return ckErr
     377             : }
     378             : 
     379             : func (d *DB) writeCheckpointManifest(
     380             :         fs vfs.FS,
     381             :         formatVers FormatMajorVersion,
     382             :         destDirPath string,
     383             :         destDir vfs.File,
     384             :         manifestFileNum base.DiskFileNum,
     385             :         manifestSize int64,
     386             :         excludedFiles map[deletedFileEntry]*fileMetadata,
     387             :         removeBackingTables []base.DiskFileNum,
     388           2 : ) error {
     389           2 :         // Copy the MANIFEST, and create a pointer to it. We copy rather
     390           2 :         // than link because additional version edits added to the
     391           2 :         // MANIFEST after we took our snapshot of the sstables will
     392           2 :         // reference sstables that aren't in our checkpoint. For a
     393           2 :         // similar reason, we need to limit how much of the MANIFEST we
     394           2 :         // copy.
     395           2 :         // If some files are excluded from the checkpoint, also append a block that
     396           2 :         // records those files as deleted.
     397           2 :         if err := func() error {
     398           2 :                 srcPath := base.MakeFilepath(fs, d.dirname, fileTypeManifest, manifestFileNum)
     399           2 :                 destPath := fs.PathJoin(destDirPath, fs.PathBase(srcPath))
     400           2 :                 src, err := fs.Open(srcPath, vfs.SequentialReadsOption)
     401           2 :                 if err != nil {
     402           0 :                         return err
     403           0 :                 }
     404           2 :                 defer src.Close()
     405           2 : 
     406           2 :                 dst, err := fs.Create(destPath, vfs.WriteCategoryUnspecified)
     407           2 :                 if err != nil {
     408           0 :                         return err
     409           0 :                 }
     410           2 :                 defer dst.Close()
     411           2 : 
     412           2 :                 // Copy all existing records. We need to copy at the record level in case we
     413           2 :                 // need to append another record with the excluded files (we cannot simply
     414           2 :                 // append a record after a raw data copy; see
     415           2 :                 // https://github.com/cockroachdb/cockroach/issues/100935).
     416           2 :                 r := record.NewReader(&io.LimitedReader{R: src, N: manifestSize}, manifestFileNum)
     417           2 :                 w := record.NewWriter(dst)
     418           2 :                 for {
     419           2 :                         rr, err := r.Next()
     420           2 :                         if err != nil {
     421           2 :                                 if err == io.EOF {
     422           2 :                                         break
     423             :                                 }
     424           0 :                                 return err
     425             :                         }
     426             : 
     427           2 :                         rw, err := w.Next()
     428           2 :                         if err != nil {
     429           0 :                                 return err
     430           0 :                         }
     431           2 :                         if _, err := io.Copy(rw, rr); err != nil {
     432           0 :                                 return err
     433           0 :                         }
     434             :                 }
     435             : 
     436           2 :                 if len(excludedFiles) > 0 {
     437           2 :                         // Write out an additional VersionEdit that deletes the excluded SST files.
     438           2 :                         ve := versionEdit{
     439           2 :                                 DeletedFiles:         excludedFiles,
     440           2 :                                 RemovedBackingTables: removeBackingTables,
     441           2 :                         }
     442           2 : 
     443           2 :                         rw, err := w.Next()
     444           2 :                         if err != nil {
     445           0 :                                 return err
     446           0 :                         }
     447           2 :                         if err := ve.Encode(rw); err != nil {
     448           0 :                                 return err
     449           0 :                         }
     450             :                 }
     451           2 :                 if err := w.Close(); err != nil {
     452           0 :                         return err
     453           0 :                 }
     454           2 :                 return dst.Sync()
     455           0 :         }(); err != nil {
     456           0 :                 return err
     457           0 :         }
     458             : 
     459           2 :         var manifestMarker *atomicfs.Marker
     460           2 :         manifestMarker, _, err := atomicfs.LocateMarker(fs, destDirPath, manifestMarkerName)
     461           2 :         if err != nil {
     462           0 :                 return err
     463           0 :         }
     464           2 :         if err := manifestMarker.Move(base.MakeFilename(fileTypeManifest, manifestFileNum)); err != nil {
     465           0 :                 return err
     466           0 :         }
     467           2 :         return manifestMarker.Close()
     468             : }

Generated by: LCOV version 1.14