LCOV - code coverage report
Current view: top level - pebble - checkpoint.go (source / functions) Hit Total Coverage
Test: 2024-08-08 08:17Z 3419a647 - meta test only.lcov Lines: 212 303 70.0 %
Date: 2024-08-08 08:17:48 Functions: 0 0 -

          Line data    Source code
       1             : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
       2             : // of this source code is governed by a BSD-style license that can be found in
       3             : // the LICENSE file.
       4             : 
       5             : package pebble
       6             : 
       7             : import (
       8             :         "io"
       9             :         "os"
      10             : 
      11             :         "github.com/cockroachdb/errors"
      12             :         "github.com/cockroachdb/errors/oserror"
      13             :         "github.com/cockroachdb/pebble/internal/base"
      14             :         "github.com/cockroachdb/pebble/record"
      15             :         "github.com/cockroachdb/pebble/vfs"
      16             :         "github.com/cockroachdb/pebble/vfs/atomicfs"
      17             :         "github.com/cockroachdb/pebble/wal"
      18             : )
      19             : 
      20             : // checkpointOptions hold the optional parameters to construct checkpoint
      21             : // snapshots.
      22             : type checkpointOptions struct {
      23             :         // flushWAL set to true will force a flush and sync of the WAL prior to
      24             :         // checkpointing.
      25             :         flushWAL bool
      26             : 
      27             :         // If set, any SSTs that don't overlap with these spans are excluded from a checkpoint.
      28             :         restrictToSpans []CheckpointSpan
      29             : }
      30             : 
      31             : // CheckpointOption set optional parameters used by `DB.Checkpoint`.
      32             : type CheckpointOption func(*checkpointOptions)
      33             : 
      34             : // WithFlushedWAL enables flushing and syncing the WAL prior to constructing a
      35             : // checkpoint. This guarantees that any writes committed before calling
      36             : // DB.Checkpoint will be part of that checkpoint.
      37             : //
      38             : // Note that this setting can only be useful in cases when some writes are
      39             : // performed with Sync = false. Otherwise, the guarantee will already be met.
      40             : //
      41             : // Passing this option is functionally equivalent to calling
      42             : // DB.LogData(nil, Sync) right before DB.Checkpoint.
      43           0 : func WithFlushedWAL() CheckpointOption {
      44           0 :         return func(opt *checkpointOptions) {
      45           0 :                 opt.flushWAL = true
      46           0 :         }
      47             : }
      48             : 
      49             : // WithRestrictToSpans specifies spans of interest for the checkpoint. Any SSTs
      50             : // that don't overlap with any of these spans are excluded from the checkpoint.
      51             : //
      52             : // Note that the checkpoint can still surface keys outside of these spans (from
      53             : // the WAL and from SSTs that partially overlap with these spans). Moreover,
      54             : // these surface keys aren't necessarily "valid" in that they could have been
      55             : // modified but the SST containing the modification is excluded.
      56           1 : func WithRestrictToSpans(spans []CheckpointSpan) CheckpointOption {
      57           1 :         return func(opt *checkpointOptions) {
      58           1 :                 opt.restrictToSpans = spans
      59           1 :         }
      60             : }
      61             : 
      62             : // CheckpointSpan is a key range [Start, End) (inclusive on Start, exclusive on
      63             : // End) of interest for a checkpoint.
      64             : type CheckpointSpan struct {
      65             :         Start []byte
      66             :         End   []byte
      67             : }
      68             : 
      69             : // excludeFromCheckpoint returns true if an SST file should be excluded from the
      70             : // checkpoint because it does not overlap with the spans of interest
      71             : // (opt.restrictToSpans).
      72           1 : func excludeFromCheckpoint(f *fileMetadata, opt *checkpointOptions, cmp Compare) bool {
      73           1 :         if len(opt.restrictToSpans) == 0 {
      74           1 :                 // Option not set; don't exclude anything.
      75           1 :                 return false
      76           1 :         }
      77           1 :         for _, s := range opt.restrictToSpans {
      78           1 :                 spanBounds := base.UserKeyBoundsEndExclusive(s.Start, s.End)
      79           1 :                 if f.Overlaps(cmp, &spanBounds) {
      80           1 :                         return false
      81           1 :                 }
      82             :         }
      83             :         // None of the restrictToSpans overlapped; we can exclude this file.
      84           1 :         return true
      85             : }
      86             : 
      87             : // mkdirAllAndSyncParents creates destDir and any of its missing parents.
      88             : // Those missing parents, as well as the closest existing ancestor, are synced.
      89             : // Returns a handle to the directory created at destDir.
      90           1 : func mkdirAllAndSyncParents(fs vfs.FS, destDir string) (vfs.File, error) {
      91           1 :         // Collect paths for all directories between destDir (excluded) and its
      92           1 :         // closest existing ancestor (included).
      93           1 :         var parentPaths []string
      94           1 :         foundExistingAncestor := false
      95           1 :         for parentPath := fs.PathDir(destDir); parentPath != "."; parentPath = fs.PathDir(parentPath) {
      96           1 :                 parentPaths = append(parentPaths, parentPath)
      97           1 :                 _, err := fs.Stat(parentPath)
      98           1 :                 if err == nil {
      99           1 :                         // Exit loop at the closest existing ancestor.
     100           1 :                         foundExistingAncestor = true
     101           1 :                         break
     102             :                 }
     103           1 :                 if !oserror.IsNotExist(err) {
     104           0 :                         return nil, err
     105           0 :                 }
     106             :         }
     107             :         // Handle empty filesystem edge case.
     108           1 :         if !foundExistingAncestor {
     109           1 :                 parentPaths = append(parentPaths, "")
     110           1 :         }
     111             :         // Create destDir and any of its missing parents.
     112           1 :         if err := fs.MkdirAll(destDir, 0755); err != nil {
     113           0 :                 return nil, err
     114           0 :         }
     115             :         // Sync all the parent directories up to the closest existing ancestor,
     116             :         // included.
     117           1 :         for _, parentPath := range parentPaths {
     118           1 :                 parentDir, err := fs.OpenDir(parentPath)
     119           1 :                 if err != nil {
     120           0 :                         return nil, err
     121           0 :                 }
     122           1 :                 err = parentDir.Sync()
     123           1 :                 if err != nil {
     124           0 :                         _ = parentDir.Close()
     125           0 :                         return nil, err
     126           0 :                 }
     127           1 :                 err = parentDir.Close()
     128           1 :                 if err != nil {
     129           0 :                         return nil, err
     130           0 :                 }
     131             :         }
     132           1 :         return fs.OpenDir(destDir)
     133             : }
     134             : 
     135             : // Checkpoint constructs a snapshot of the DB instance in the specified
     136             : // directory. The WAL, MANIFEST, OPTIONS, and sstables will be copied into the
     137             : // snapshot. Hard links will be used when possible. Beware of the significant
     138             : // space overhead for a checkpoint if hard links are disabled. Also beware that
     139             : // even if hard links are used, the space overhead for the checkpoint will
     140             : // increase over time as the DB performs compactions.
     141             : //
     142             : // Note that shared files in a checkpoint could get deleted if the DB is
     143             : // restarted after a checkpoint operation, as the reference for the checkpoint
     144             : // is only maintained in memory. This is okay as long as users of Checkpoint
     145             : // crash shortly afterwards with a "poison file" preventing further restarts.
     146             : func (d *DB) Checkpoint(
     147             :         destDir string, opts ...CheckpointOption,
     148             : ) (
     149             :         ckErr error, /* used in deferred cleanup */
     150           1 : ) {
     151           1 :         opt := &checkpointOptions{}
     152           1 :         for _, fn := range opts {
     153           1 :                 fn(opt)
     154           1 :         }
     155             : 
     156           1 :         if _, err := d.opts.FS.Stat(destDir); !oserror.IsNotExist(err) {
     157           0 :                 if err == nil {
     158           0 :                         return &os.PathError{
     159           0 :                                 Op:   "checkpoint",
     160           0 :                                 Path: destDir,
     161           0 :                                 Err:  oserror.ErrExist,
     162           0 :                         }
     163           0 :                 }
     164           0 :                 return err
     165             :         }
     166             : 
     167           1 :         if opt.flushWAL && !d.opts.DisableWAL {
     168           0 :                 // Write an empty log-data record to flush and sync the WAL.
     169           0 :                 if err := d.LogData(nil /* data */, Sync); err != nil {
     170           0 :                         return err
     171           0 :                 }
     172             :         }
     173             : 
     174             :         // Disable file deletions.
     175           1 :         d.mu.Lock()
     176           1 :         d.disableFileDeletions()
     177           1 :         defer func() {
     178           1 :                 d.mu.Lock()
     179           1 :                 defer d.mu.Unlock()
     180           1 :                 d.enableFileDeletions()
     181           1 :         }()
     182             : 
     183             :         // TODO(peter): RocksDB provides the option to roll the manifest if the
     184             :         // MANIFEST size is too large. Should we do this too?
     185             : 
     186             :         // Lock the manifest before getting the current version. We need the
     187             :         // length of the manifest that we read to match the current version that
     188             :         // we read, otherwise we might copy a versionEdit not reflected in the
     189             :         // sstables we copy/link.
     190           1 :         d.mu.versions.logLock()
     191           1 :         // Get the unflushed log files, the current version, and the current manifest
     192           1 :         // file number.
     193           1 :         memQueue := d.mu.mem.queue
     194           1 :         current := d.mu.versions.currentVersion()
     195           1 :         formatVers := d.FormatMajorVersion()
     196           1 :         manifestFileNum := d.mu.versions.manifestFileNum
     197           1 :         manifestSize := d.mu.versions.manifest.Size()
     198           1 :         optionsFileNum := d.optionsFileNum
     199           1 : 
     200           1 :         virtualBackingFiles := make(map[base.DiskFileNum]struct{})
     201           1 :         d.mu.versions.virtualBackings.ForEach(func(backing *fileBacking) {
     202           1 :                 virtualBackingFiles[backing.DiskFileNum] = struct{}{}
     203           1 :         })
     204             : 
     205           1 :         queuedLogNums := make([]wal.NumWAL, 0, len(memQueue))
     206           1 :         for i := range memQueue {
     207           1 :                 if logNum := memQueue[i].logNum; logNum != 0 {
     208           1 :                         queuedLogNums = append(queuedLogNums, wal.NumWAL(logNum))
     209           1 :                 }
     210             :         }
     211             :         // Release the manifest and DB.mu so we don't block other operations on
     212             :         // the database.
     213           1 :         d.mu.versions.logUnlock()
     214           1 :         d.mu.Unlock()
     215           1 : 
     216           1 :         allLogicalLogs, err := d.mu.log.manager.List()
     217           1 :         if err != nil {
     218           0 :                 return err
     219           0 :         }
     220             : 
     221             :         // Wrap the normal filesystem with one which wraps newly created files with
     222             :         // vfs.NewSyncingFile.
     223           1 :         fs := vfs.NewSyncingFS(d.opts.FS, vfs.SyncingFileOptions{
     224           1 :                 NoSyncOnClose: d.opts.NoSyncOnClose,
     225           1 :                 BytesPerSync:  d.opts.BytesPerSync,
     226           1 :         })
     227           1 : 
     228           1 :         // Create the dir and its parents (if necessary), and sync them.
     229           1 :         var dir vfs.File
     230           1 :         defer func() {
     231           1 :                 if dir != nil {
     232           0 :                         _ = dir.Close()
     233           0 :                 }
     234           1 :                 if ckErr != nil {
     235           0 :                         // Attempt to cleanup on error.
     236           0 :                         _ = fs.RemoveAll(destDir)
     237           0 :                 }
     238             :         }()
     239           1 :         dir, ckErr = mkdirAllAndSyncParents(fs, destDir)
     240           1 :         if ckErr != nil {
     241           0 :                 return ckErr
     242           0 :         }
     243             : 
     244           1 :         {
     245           1 :                 // Link or copy the OPTIONS.
     246           1 :                 srcPath := base.MakeFilepath(fs, d.dirname, fileTypeOptions, optionsFileNum)
     247           1 :                 destPath := fs.PathJoin(destDir, fs.PathBase(srcPath))
     248           1 :                 ckErr = vfs.LinkOrCopy(fs, srcPath, destPath)
     249           1 :                 if ckErr != nil {
     250           0 :                         return ckErr
     251           0 :                 }
     252             :         }
     253             : 
     254           1 :         {
     255           1 :                 // Set the format major version in the destination directory.
     256           1 :                 var versionMarker *atomicfs.Marker
     257           1 :                 versionMarker, _, ckErr = atomicfs.LocateMarker(fs, destDir, formatVersionMarkerName)
     258           1 :                 if ckErr != nil {
     259           0 :                         return ckErr
     260           0 :                 }
     261             : 
     262             :                 // We use the marker to encode the active format version in the
     263             :                 // marker filename. Unlike other uses of the atomic marker,
     264             :                 // there is no file with the filename `formatVers.String()` on
     265             :                 // the filesystem.
     266           1 :                 ckErr = versionMarker.Move(formatVers.String())
     267           1 :                 if ckErr != nil {
     268           0 :                         return ckErr
     269           0 :                 }
     270           1 :                 ckErr = versionMarker.Close()
     271           1 :                 if ckErr != nil {
     272           0 :                         return ckErr
     273           0 :                 }
     274             :         }
     275             : 
     276           1 :         var excludedFiles map[deletedFileEntry]*fileMetadata
     277           1 :         var remoteFiles []base.DiskFileNum
     278           1 :         // Set of FileBacking.DiskFileNum which will be required by virtual sstables
     279           1 :         // in the checkpoint.
     280           1 :         requiredVirtualBackingFiles := make(map[base.DiskFileNum]struct{})
     281           1 :         // Link or copy the sstables.
     282           1 :         for l := range current.Levels {
     283           1 :                 iter := current.Levels[l].Iter()
     284           1 :                 for f := iter.First(); f != nil; f = iter.Next() {
     285           1 :                         if excludeFromCheckpoint(f, opt, d.cmp) {
     286           1 :                                 if excludedFiles == nil {
     287           1 :                                         excludedFiles = make(map[deletedFileEntry]*fileMetadata)
     288           1 :                                 }
     289           1 :                                 excludedFiles[deletedFileEntry{
     290           1 :                                         Level:   l,
     291           1 :                                         FileNum: f.FileNum,
     292           1 :                                 }] = f
     293           1 :                                 continue
     294             :                         }
     295             : 
     296           1 :                         fileBacking := f.FileBacking
     297           1 :                         if f.Virtual {
     298           1 :                                 if _, ok := requiredVirtualBackingFiles[fileBacking.DiskFileNum]; ok {
     299           1 :                                         continue
     300             :                                 }
     301           1 :                                 requiredVirtualBackingFiles[fileBacking.DiskFileNum] = struct{}{}
     302             :                         }
     303           1 :                         meta, err := d.objProvider.Lookup(fileTypeTable, fileBacking.DiskFileNum)
     304           1 :                         if err != nil {
     305           0 :                                 ckErr = err
     306           0 :                                 return ckErr
     307           0 :                         }
     308           1 :                         if meta.IsRemote() {
     309           0 :                                 // We don't copy remote files. This is desirable as checkpointing is
     310           0 :                                 // supposed to be a fast operation, and references to remote files can
     311           0 :                                 // always be resolved by any checkpoint readers by reading the object
     312           0 :                                 // catalog. We don't add this file to excludedFiles either, as that'd
     313           0 :                                 // cause it to be deleted in the second manifest entry which is also
     314           0 :                                 // inaccurate.
     315           0 :                                 remoteFiles = append(remoteFiles, meta.DiskFileNum)
     316           0 :                                 continue
     317             :                         }
     318             : 
     319           1 :                         srcPath := base.MakeFilepath(fs, d.dirname, fileTypeTable, fileBacking.DiskFileNum)
     320           1 :                         destPath := fs.PathJoin(destDir, fs.PathBase(srcPath))
     321           1 :                         ckErr = vfs.LinkOrCopy(fs, srcPath, destPath)
     322           1 :                         if ckErr != nil {
     323           0 :                                 return ckErr
     324           0 :                         }
     325             :                 }
     326             :         }
     327             : 
     328           1 :         var removeBackingTables []base.DiskFileNum
     329           1 :         for diskFileNum := range virtualBackingFiles {
     330           1 :                 if _, ok := requiredVirtualBackingFiles[diskFileNum]; !ok {
     331           1 :                         // The backing sstable associated with fileNum is no longer
     332           1 :                         // required.
     333           1 :                         removeBackingTables = append(removeBackingTables, diskFileNum)
     334           1 :                 }
     335             :         }
     336             : 
     337           1 :         ckErr = d.writeCheckpointManifest(
     338           1 :                 fs, formatVers, destDir, dir, manifestFileNum, manifestSize,
     339           1 :                 excludedFiles, removeBackingTables,
     340           1 :         )
     341           1 :         if ckErr != nil {
     342           0 :                 return ckErr
     343           0 :         }
     344           1 :         if len(remoteFiles) > 0 {
     345           0 :                 ckErr = d.objProvider.CheckpointState(fs, destDir, fileTypeTable, remoteFiles)
     346           0 :                 if ckErr != nil {
     347           0 :                         return ckErr
     348           0 :                 }
     349             :         }
     350             : 
     351             :         // Copy the WAL files. We copy rather than link because WAL file recycling
     352             :         // will cause the WAL files to be reused which would invalidate the
     353             :         // checkpoint.
     354           1 :         for _, logNum := range queuedLogNums {
     355           1 :                 log, ok := allLogicalLogs.Get(logNum)
     356           1 :                 if !ok {
     357           0 :                         return errors.Newf("log %s not found", logNum)
     358           0 :                 }
     359           1 :                 for i := 0; i < log.NumSegments(); i++ {
     360           1 :                         srcFS, srcPath := log.SegmentLocation(i)
     361           1 :                         destPath := fs.PathJoin(destDir, srcFS.PathBase(srcPath))
     362           1 :                         ckErr = vfs.CopyAcrossFS(srcFS, srcPath, fs, destPath)
     363           1 :                         if ckErr != nil {
     364           0 :                                 return ckErr
     365           0 :                         }
     366             :                 }
     367             :         }
     368             : 
     369             :         // Sync and close the checkpoint directory.
     370           1 :         ckErr = dir.Sync()
     371           1 :         if ckErr != nil {
     372           0 :                 return ckErr
     373           0 :         }
     374           1 :         ckErr = dir.Close()
     375           1 :         dir = nil
     376           1 :         return ckErr
     377             : }
     378             : 
     379             : func (d *DB) writeCheckpointManifest(
     380             :         fs vfs.FS,
     381             :         formatVers FormatMajorVersion,
     382             :         destDirPath string,
     383             :         destDir vfs.File,
     384             :         manifestFileNum base.DiskFileNum,
     385             :         manifestSize int64,
     386             :         excludedFiles map[deletedFileEntry]*fileMetadata,
     387             :         removeBackingTables []base.DiskFileNum,
     388           1 : ) error {
     389           1 :         // Copy the MANIFEST, and create a pointer to it. We copy rather
     390           1 :         // than link because additional version edits added to the
     391           1 :         // MANIFEST after we took our snapshot of the sstables will
     392           1 :         // reference sstables that aren't in our checkpoint. For a
     393           1 :         // similar reason, we need to limit how much of the MANIFEST we
     394           1 :         // copy.
     395           1 :         // If some files are excluded from the checkpoint, also append a block that
     396           1 :         // records those files as deleted.
     397           1 :         if err := func() error {
     398           1 :                 srcPath := base.MakeFilepath(fs, d.dirname, fileTypeManifest, manifestFileNum)
     399           1 :                 destPath := fs.PathJoin(destDirPath, fs.PathBase(srcPath))
     400           1 :                 src, err := fs.Open(srcPath, vfs.SequentialReadsOption)
     401           1 :                 if err != nil {
     402           0 :                         return err
     403           0 :                 }
     404           1 :                 defer src.Close()
     405           1 : 
     406           1 :                 dst, err := fs.Create(destPath, vfs.WriteCategoryUnspecified)
     407           1 :                 if err != nil {
     408           0 :                         return err
     409           0 :                 }
     410           1 :                 defer dst.Close()
     411           1 : 
     412           1 :                 // Copy all existing records. We need to copy at the record level in case we
     413           1 :                 // need to append another record with the excluded files (we cannot simply
     414           1 :                 // append a record after a raw data copy; see
     415           1 :                 // https://github.com/cockroachdb/cockroach/issues/100935).
     416           1 :                 r := record.NewReader(&io.LimitedReader{R: src, N: manifestSize}, manifestFileNum)
     417           1 :                 w := record.NewWriter(dst)
     418           1 :                 for {
     419           1 :                         rr, err := r.Next()
     420           1 :                         if err != nil {
     421           1 :                                 if err == io.EOF {
     422           1 :                                         break
     423             :                                 }
     424           0 :                                 return err
     425             :                         }
     426             : 
     427           1 :                         rw, err := w.Next()
     428           1 :                         if err != nil {
     429           0 :                                 return err
     430           0 :                         }
     431           1 :                         if _, err := io.Copy(rw, rr); err != nil {
     432           0 :                                 return err
     433           0 :                         }
     434             :                 }
     435             : 
     436           1 :                 if len(excludedFiles) > 0 {
     437           1 :                         // Write out an additional VersionEdit that deletes the excluded SST files.
     438           1 :                         ve := versionEdit{
     439           1 :                                 DeletedFiles:         excludedFiles,
     440           1 :                                 RemovedBackingTables: removeBackingTables,
     441           1 :                         }
     442           1 : 
     443           1 :                         rw, err := w.Next()
     444           1 :                         if err != nil {
     445           0 :                                 return err
     446           0 :                         }
     447           1 :                         if err := ve.Encode(rw); err != nil {
     448           0 :                                 return err
     449           0 :                         }
     450             :                 }
     451           1 :                 if err := w.Close(); err != nil {
     452           0 :                         return err
     453           0 :                 }
     454           1 :                 return dst.Sync()
     455           0 :         }(); err != nil {
     456           0 :                 return err
     457           0 :         }
     458             : 
     459           1 :         var manifestMarker *atomicfs.Marker
     460           1 :         manifestMarker, _, err := atomicfs.LocateMarker(fs, destDirPath, manifestMarkerName)
     461           1 :         if err != nil {
     462           0 :                 return err
     463           0 :         }
     464           1 :         if err := manifestMarker.Move(base.MakeFilename(fileTypeManifest, manifestFileNum)); err != nil {
     465           0 :                 return err
     466           0 :         }
     467           1 :         return manifestMarker.Close()
     468             : }

Generated by: LCOV version 1.14