LCOV - code coverage report
Current view: top level - pebble - checkpoint.go (source / functions) Hit Total Coverage
Test: 2024-03-16 08:17Z 0eccafb7 - tests only.lcov Lines: 226 282 80.1 %
Date: 2024-03-16 08:18:04 Functions: 0 0 -

          Line data    Source code
       1             : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
       2             : // of this source code is governed by a BSD-style license that can be found in
       3             : // the LICENSE file.
       4             : 
       5             : package pebble
       6             : 
       7             : import (
       8             :         "io"
       9             :         "os"
      10             : 
      11             :         "github.com/cockroachdb/errors"
      12             :         "github.com/cockroachdb/errors/oserror"
      13             :         "github.com/cockroachdb/pebble/internal/base"
      14             :         "github.com/cockroachdb/pebble/record"
      15             :         "github.com/cockroachdb/pebble/vfs"
      16             :         "github.com/cockroachdb/pebble/vfs/atomicfs"
      17             :         "github.com/cockroachdb/pebble/wal"
      18             : )
      19             : 
      20             : // checkpointOptions hold the optional parameters to construct checkpoint
      21             : // snapshots.
      22             : type checkpointOptions struct {
      23             :         // flushWAL set to true will force a flush and sync of the WAL prior to
      24             :         // checkpointing.
      25             :         flushWAL bool
      26             : 
      27             :         // If set, any SSTs that don't overlap with these spans are excluded from a checkpoint.
      28             :         restrictToSpans []CheckpointSpan
      29             : }
      30             : 
      31             : // CheckpointOption set optional parameters used by `DB.Checkpoint`.
      32             : type CheckpointOption func(*checkpointOptions)
      33             : 
      34             : // WithFlushedWAL enables flushing and syncing the WAL prior to constructing a
      35             : // checkpoint. This guarantees that any writes committed before calling
      36             : // DB.Checkpoint will be part of that checkpoint.
      37             : //
      38             : // Note that this setting can only be useful in cases when some writes are
      39             : // performed with Sync = false. Otherwise, the guarantee will already be met.
      40             : //
      41             : // Passing this option is functionally equivalent to calling
      42             : // DB.LogData(nil, Sync) right before DB.Checkpoint.
      43           1 : func WithFlushedWAL() CheckpointOption {
      44           1 :         return func(opt *checkpointOptions) {
      45           1 :                 opt.flushWAL = true
      46           1 :         }
      47             : }
      48             : 
      49             : // WithRestrictToSpans specifies spans of interest for the checkpoint. Any SSTs
      50             : // that don't overlap with any of these spans are excluded from the checkpoint.
      51             : //
      52             : // Note that the checkpoint can still surface keys outside of these spans (from
      53             : // the WAL and from SSTs that partially overlap with these spans). Moreover,
      54             : // these surface keys aren't necessarily "valid" in that they could have been
      55             : // modified but the SST containing the modification is excluded.
      56           1 : func WithRestrictToSpans(spans []CheckpointSpan) CheckpointOption {
      57           1 :         return func(opt *checkpointOptions) {
      58           1 :                 opt.restrictToSpans = spans
      59           1 :         }
      60             : }
      61             : 
      62             : // CheckpointSpan is a key range [Start, End) (inclusive on Start, exclusive on
      63             : // End) of interest for a checkpoint.
      64             : type CheckpointSpan struct {
      65             :         Start []byte
      66             :         End   []byte
      67             : }
      68             : 
      69             : // excludeFromCheckpoint returns true if an SST file should be excluded from the
      70             : // checkpoint because it does not overlap with the spans of interest
      71             : // (opt.restrictToSpans).
      72           1 : func excludeFromCheckpoint(f *fileMetadata, opt *checkpointOptions, cmp Compare) bool {
      73           1 :         if len(opt.restrictToSpans) == 0 {
      74           1 :                 // Option not set; don't exclude anything.
      75           1 :                 return false
      76           1 :         }
      77           1 :         for _, s := range opt.restrictToSpans {
      78           1 :                 if f.Overlaps(cmp, s.Start, s.End, true /* exclusiveEnd */) {
      79           1 :                         return false
      80           1 :                 }
      81             :         }
      82             :         // None of the restrictToSpans overlapped; we can exclude this file.
      83           1 :         return true
      84             : }
      85             : 
      86             : // mkdirAllAndSyncParents creates destDir and any of its missing parents.
      87             : // Those missing parents, as well as the closest existing ancestor, are synced.
      88             : // Returns a handle to the directory created at destDir.
      89           1 : func mkdirAllAndSyncParents(fs vfs.FS, destDir string) (vfs.File, error) {
      90           1 :         // Collect paths for all directories between destDir (excluded) and its
      91           1 :         // closest existing ancestor (included).
      92           1 :         var parentPaths []string
      93           1 :         foundExistingAncestor := false
      94           1 :         for parentPath := fs.PathDir(destDir); parentPath != "."; parentPath = fs.PathDir(parentPath) {
      95           1 :                 parentPaths = append(parentPaths, parentPath)
      96           1 :                 _, err := fs.Stat(parentPath)
      97           1 :                 if err == nil {
      98           1 :                         // Exit loop at the closest existing ancestor.
      99           1 :                         foundExistingAncestor = true
     100           1 :                         break
     101             :                 }
     102           1 :                 if !oserror.IsNotExist(err) {
     103           0 :                         return nil, err
     104           0 :                 }
     105             :         }
     106             :         // Handle empty filesystem edge case.
     107           1 :         if !foundExistingAncestor {
     108           1 :                 parentPaths = append(parentPaths, "")
     109           1 :         }
     110             :         // Create destDir and any of its missing parents.
     111           1 :         if err := fs.MkdirAll(destDir, 0755); err != nil {
     112           1 :                 return nil, err
     113           1 :         }
     114             :         // Sync all the parent directories up to the closest existing ancestor,
     115             :         // included.
     116           1 :         for _, parentPath := range parentPaths {
     117           1 :                 parentDir, err := fs.OpenDir(parentPath)
     118           1 :                 if err != nil {
     119           1 :                         return nil, err
     120           1 :                 }
     121           1 :                 err = parentDir.Sync()
     122           1 :                 if err != nil {
     123           1 :                         _ = parentDir.Close()
     124           1 :                         return nil, err
     125           1 :                 }
     126           1 :                 err = parentDir.Close()
     127           1 :                 if err != nil {
     128           0 :                         return nil, err
     129           0 :                 }
     130             :         }
     131           1 :         return fs.OpenDir(destDir)
     132             : }
     133             : 
     134             : // Checkpoint constructs a snapshot of the DB instance in the specified
     135             : // directory. The WAL, MANIFEST, OPTIONS, and sstables will be copied into the
     136             : // snapshot. Hard links will be used when possible. Beware of the significant
     137             : // space overhead for a checkpoint if hard links are disabled. Also beware that
     138             : // even if hard links are used, the space overhead for the checkpoint will
     139             : // increase over time as the DB performs compactions.
     140             : func (d *DB) Checkpoint(
     141             :         destDir string, opts ...CheckpointOption,
     142             : ) (
     143             :         ckErr error, /* used in deferred cleanup */
     144           1 : ) {
     145           1 :         opt := &checkpointOptions{}
     146           1 :         for _, fn := range opts {
     147           1 :                 fn(opt)
     148           1 :         }
     149             : 
     150           1 :         if _, err := d.opts.FS.Stat(destDir); !oserror.IsNotExist(err) {
     151           1 :                 if err == nil {
     152           1 :                         return &os.PathError{
     153           1 :                                 Op:   "checkpoint",
     154           1 :                                 Path: destDir,
     155           1 :                                 Err:  oserror.ErrExist,
     156           1 :                         }
     157           1 :                 }
     158           0 :                 return err
     159             :         }
     160             : 
     161           1 :         if opt.flushWAL && !d.opts.DisableWAL {
     162           1 :                 // Write an empty log-data record to flush and sync the WAL.
     163           1 :                 if err := d.LogData(nil /* data */, Sync); err != nil {
     164           0 :                         return err
     165           0 :                 }
     166             :         }
     167             : 
     168             :         // Disable file deletions.
     169           1 :         d.mu.Lock()
     170           1 :         d.disableFileDeletions()
     171           1 :         defer func() {
     172           1 :                 d.mu.Lock()
     173           1 :                 defer d.mu.Unlock()
     174           1 :                 d.enableFileDeletions()
     175           1 :         }()
     176             : 
     177             :         // TODO(peter): RocksDB provides the option to roll the manifest if the
     178             :         // MANIFEST size is too large. Should we do this too?
     179             : 
     180             :         // Lock the manifest before getting the current version. We need the
     181             :         // length of the manifest that we read to match the current version that
     182             :         // we read, otherwise we might copy a versionEdit not reflected in the
     183             :         // sstables we copy/link.
     184           1 :         d.mu.versions.logLock()
     185           1 :         // Get the unflushed log files, the current version, and the current manifest
     186           1 :         // file number.
     187           1 :         memQueue := d.mu.mem.queue
     188           1 :         current := d.mu.versions.currentVersion()
     189           1 :         formatVers := d.FormatMajorVersion()
     190           1 :         manifestFileNum := d.mu.versions.manifestFileNum
     191           1 :         manifestSize := d.mu.versions.manifest.Size()
     192           1 :         optionsFileNum := d.optionsFileNum
     193           1 : 
     194           1 :         virtualBackingFiles := make(map[base.DiskFileNum]struct{})
     195           1 :         d.mu.versions.virtualBackings.ForEach(func(backing *fileBacking) {
     196           1 :                 virtualBackingFiles[backing.DiskFileNum] = struct{}{}
     197           1 :         })
     198             : 
     199           1 :         queuedLogNums := make([]wal.NumWAL, 0, len(memQueue))
     200           1 :         for i := range memQueue {
     201           1 :                 if logNum := memQueue[i].logNum; logNum != 0 {
     202           1 :                         queuedLogNums = append(queuedLogNums, wal.NumWAL(logNum))
     203           1 :                 }
     204             :         }
     205             :         // Release the manifest and DB.mu so we don't block other operations on
     206             :         // the database.
     207           1 :         d.mu.versions.logUnlock()
     208           1 :         d.mu.Unlock()
     209           1 : 
     210           1 :         allLogicalLogs, err := d.mu.log.manager.List()
     211           1 :         if err != nil {
     212           0 :                 return err
     213           0 :         }
     214             : 
     215             :         // Wrap the normal filesystem with one which wraps newly created files with
     216             :         // vfs.NewSyncingFile.
     217           1 :         fs := vfs.NewSyncingFS(d.opts.FS, vfs.SyncingFileOptions{
     218           1 :                 NoSyncOnClose: d.opts.NoSyncOnClose,
     219           1 :                 BytesPerSync:  d.opts.BytesPerSync,
     220           1 :         })
     221           1 : 
     222           1 :         // Create the dir and its parents (if necessary), and sync them.
     223           1 :         var dir vfs.File
     224           1 :         defer func() {
     225           1 :                 if dir != nil {
     226           0 :                         _ = dir.Close()
     227           0 :                 }
     228           1 :                 if ckErr != nil {
     229           0 :                         // Attempt to cleanup on error.
     230           0 :                         _ = fs.RemoveAll(destDir)
     231           0 :                 }
     232             :         }()
     233           1 :         dir, ckErr = mkdirAllAndSyncParents(fs, destDir)
     234           1 :         if ckErr != nil {
     235           0 :                 return ckErr
     236           0 :         }
     237             : 
     238           1 :         {
     239           1 :                 // Link or copy the OPTIONS.
     240           1 :                 srcPath := base.MakeFilepath(fs, d.dirname, fileTypeOptions, optionsFileNum)
     241           1 :                 destPath := fs.PathJoin(destDir, fs.PathBase(srcPath))
     242           1 :                 ckErr = vfs.LinkOrCopy(fs, srcPath, destPath)
     243           1 :                 if ckErr != nil {
     244           0 :                         return ckErr
     245           0 :                 }
     246             :         }
     247             : 
     248           1 :         {
     249           1 :                 // Set the format major version in the destination directory.
     250           1 :                 var versionMarker *atomicfs.Marker
     251           1 :                 versionMarker, _, ckErr = atomicfs.LocateMarker(fs, destDir, formatVersionMarkerName)
     252           1 :                 if ckErr != nil {
     253           0 :                         return ckErr
     254           0 :                 }
     255             : 
     256             :                 // We use the marker to encode the active format version in the
     257             :                 // marker filename. Unlike other uses of the atomic marker,
     258             :                 // there is no file with the filename `formatVers.String()` on
     259             :                 // the filesystem.
     260           1 :                 ckErr = versionMarker.Move(formatVers.String())
     261           1 :                 if ckErr != nil {
     262           0 :                         return ckErr
     263           0 :                 }
     264           1 :                 ckErr = versionMarker.Close()
     265           1 :                 if ckErr != nil {
     266           0 :                         return ckErr
     267           0 :                 }
     268             :         }
     269             : 
     270           1 :         var excludedFiles map[deletedFileEntry]*fileMetadata
     271           1 :         // Set of FileBacking.DiskFileNum which will be required by virtual sstables
     272           1 :         // in the checkpoint.
     273           1 :         requiredVirtualBackingFiles := make(map[base.DiskFileNum]struct{})
     274           1 :         // Link or copy the sstables.
     275           1 :         for l := range current.Levels {
     276           1 :                 iter := current.Levels[l].Iter()
     277           1 :                 for f := iter.First(); f != nil; f = iter.Next() {
     278           1 :                         if excludeFromCheckpoint(f, opt, d.cmp) {
     279           1 :                                 if excludedFiles == nil {
     280           1 :                                         excludedFiles = make(map[deletedFileEntry]*fileMetadata)
     281           1 :                                 }
     282           1 :                                 excludedFiles[deletedFileEntry{
     283           1 :                                         Level:   l,
     284           1 :                                         FileNum: f.FileNum,
     285           1 :                                 }] = f
     286           1 :                                 continue
     287             :                         }
     288             : 
     289           1 :                         fileBacking := f.FileBacking
     290           1 :                         if f.Virtual {
     291           1 :                                 if _, ok := requiredVirtualBackingFiles[fileBacking.DiskFileNum]; ok {
     292           1 :                                         continue
     293             :                                 }
     294           1 :                                 requiredVirtualBackingFiles[fileBacking.DiskFileNum] = struct{}{}
     295             :                         }
     296             : 
     297           1 :                         srcPath := base.MakeFilepath(fs, d.dirname, fileTypeTable, fileBacking.DiskFileNum)
     298           1 :                         destPath := fs.PathJoin(destDir, fs.PathBase(srcPath))
     299           1 :                         ckErr = vfs.LinkOrCopy(fs, srcPath, destPath)
     300           1 :                         if ckErr != nil {
     301           0 :                                 return ckErr
     302           0 :                         }
     303             :                 }
     304             :         }
     305             : 
     306           1 :         var removeBackingTables []base.DiskFileNum
     307           1 :         for diskFileNum := range virtualBackingFiles {
     308           1 :                 if _, ok := requiredVirtualBackingFiles[diskFileNum]; !ok {
     309           1 :                         // The backing sstable associated with fileNum is no longer
     310           1 :                         // required.
     311           1 :                         removeBackingTables = append(removeBackingTables, diskFileNum)
     312           1 :                 }
     313             :         }
     314             : 
     315           1 :         ckErr = d.writeCheckpointManifest(
     316           1 :                 fs, formatVers, destDir, dir, manifestFileNum, manifestSize,
     317           1 :                 excludedFiles, removeBackingTables,
     318           1 :         )
     319           1 :         if ckErr != nil {
     320           0 :                 return ckErr
     321           0 :         }
     322             : 
     323             :         // Copy the WAL files. We copy rather than link because WAL file recycling
     324             :         // will cause the WAL files to be reused which would invalidate the
     325             :         // checkpoint.
     326           1 :         for _, logNum := range queuedLogNums {
     327           1 :                 log, ok := allLogicalLogs.Get(logNum)
     328           1 :                 if !ok {
     329           0 :                         return errors.Newf("log %s not found", logNum)
     330           0 :                 }
     331           1 :                 for i := 0; i < log.NumSegments(); i++ {
     332           1 :                         srcFS, srcPath := log.SegmentLocation(i)
     333           1 :                         destPath := fs.PathJoin(destDir, srcFS.PathBase(srcPath))
     334           1 :                         ckErr = vfs.CopyAcrossFS(srcFS, srcPath, fs, destPath)
     335           1 :                         if ckErr != nil {
     336           0 :                                 return ckErr
     337           0 :                         }
     338             :                 }
     339             :         }
     340             : 
     341             :         // Sync and close the checkpoint directory.
     342           1 :         ckErr = dir.Sync()
     343           1 :         if ckErr != nil {
     344           0 :                 return ckErr
     345           0 :         }
     346           1 :         ckErr = dir.Close()
     347           1 :         dir = nil
     348           1 :         return ckErr
     349             : }
     350             : 
     351             : func (d *DB) writeCheckpointManifest(
     352             :         fs vfs.FS,
     353             :         formatVers FormatMajorVersion,
     354             :         destDirPath string,
     355             :         destDir vfs.File,
     356             :         manifestFileNum base.DiskFileNum,
     357             :         manifestSize int64,
     358             :         excludedFiles map[deletedFileEntry]*fileMetadata,
     359             :         removeBackingTables []base.DiskFileNum,
     360           1 : ) error {
     361           1 :         // Copy the MANIFEST, and create a pointer to it. We copy rather
     362           1 :         // than link because additional version edits added to the
     363           1 :         // MANIFEST after we took our snapshot of the sstables will
     364           1 :         // reference sstables that aren't in our checkpoint. For a
     365           1 :         // similar reason, we need to limit how much of the MANIFEST we
     366           1 :         // copy.
     367           1 :         // If some files are excluded from the checkpoint, also append a block that
     368           1 :         // records those files as deleted.
     369           1 :         if err := func() error {
     370           1 :                 srcPath := base.MakeFilepath(fs, d.dirname, fileTypeManifest, manifestFileNum)
     371           1 :                 destPath := fs.PathJoin(destDirPath, fs.PathBase(srcPath))
     372           1 :                 src, err := fs.Open(srcPath, vfs.SequentialReadsOption)
     373           1 :                 if err != nil {
     374           0 :                         return err
     375           0 :                 }
     376           1 :                 defer src.Close()
     377           1 : 
     378           1 :                 dst, err := fs.Create(destPath)
     379           1 :                 if err != nil {
     380           0 :                         return err
     381           0 :                 }
     382           1 :                 defer dst.Close()
     383           1 : 
     384           1 :                 // Copy all existing records. We need to copy at the record level in case we
     385           1 :                 // need to append another record with the excluded files (we cannot simply
     386           1 :                 // append a record after a raw data copy; see
     387           1 :                 // https://github.com/cockroachdb/cockroach/issues/100935).
     388           1 :                 r := record.NewReader(&io.LimitedReader{R: src, N: manifestSize}, manifestFileNum)
     389           1 :                 w := record.NewWriter(dst)
     390           1 :                 for {
     391           1 :                         rr, err := r.Next()
     392           1 :                         if err != nil {
     393           1 :                                 if err == io.EOF {
     394           1 :                                         break
     395             :                                 }
     396           0 :                                 return err
     397             :                         }
     398             : 
     399           1 :                         rw, err := w.Next()
     400           1 :                         if err != nil {
     401           0 :                                 return err
     402           0 :                         }
     403           1 :                         if _, err := io.Copy(rw, rr); err != nil {
     404           0 :                                 return err
     405           0 :                         }
     406             :                 }
     407             : 
     408           1 :                 if len(excludedFiles) > 0 {
     409           1 :                         // Write out an additional VersionEdit that deletes the excluded SST files.
     410           1 :                         ve := versionEdit{
     411           1 :                                 DeletedFiles:         excludedFiles,
     412           1 :                                 RemovedBackingTables: removeBackingTables,
     413           1 :                         }
     414           1 : 
     415           1 :                         rw, err := w.Next()
     416           1 :                         if err != nil {
     417           0 :                                 return err
     418           0 :                         }
     419           1 :                         if err := ve.Encode(rw); err != nil {
     420           0 :                                 return err
     421           0 :                         }
     422             :                 }
     423           1 :                 if err := w.Close(); err != nil {
     424           0 :                         return err
     425           0 :                 }
     426           1 :                 return dst.Sync()
     427           0 :         }(); err != nil {
     428           0 :                 return err
     429           0 :         }
     430             : 
     431           1 :         var manifestMarker *atomicfs.Marker
     432           1 :         manifestMarker, _, err := atomicfs.LocateMarker(fs, destDirPath, manifestMarkerName)
     433           1 :         if err != nil {
     434           0 :                 return err
     435           0 :         }
     436           1 :         if err := manifestMarker.Move(base.MakeFilename(fileTypeManifest, manifestFileNum)); err != nil {
     437           0 :                 return err
     438           0 :         }
     439           1 :         return manifestMarker.Close()
     440             : }

Generated by: LCOV version 1.14