Line data Source code
1 : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package pebble
6 :
7 : import (
8 : "io"
9 : "os"
10 :
11 : "github.com/cockroachdb/errors/oserror"
12 : "github.com/cockroachdb/pebble/internal/base"
13 : "github.com/cockroachdb/pebble/record"
14 : "github.com/cockroachdb/pebble/vfs"
15 : "github.com/cockroachdb/pebble/vfs/atomicfs"
16 : )
17 :
18 : // checkpointOptions hold the optional parameters to construct checkpoint
19 : // snapshots.
20 : type checkpointOptions struct {
21 : // flushWAL set to true will force a flush and sync of the WAL prior to
22 : // checkpointing.
23 : flushWAL bool
24 :
25 : // If set, any SSTs that don't overlap with these spans are excluded from a checkpoint.
26 : restrictToSpans []CheckpointSpan
27 : }
28 :
29 : // CheckpointOption set optional parameters used by `DB.Checkpoint`.
30 : type CheckpointOption func(*checkpointOptions)
31 :
32 : // WithFlushedWAL enables flushing and syncing the WAL prior to constructing a
33 : // checkpoint. This guarantees that any writes committed before calling
34 : // DB.Checkpoint will be part of that checkpoint.
35 : //
36 : // Note that this setting can only be useful in cases when some writes are
37 : // performed with Sync = false. Otherwise, the guarantee will already be met.
38 : //
39 : // Passing this option is functionally equivalent to calling
40 : // DB.LogData(nil, Sync) right before DB.Checkpoint.
41 1 : func WithFlushedWAL() CheckpointOption {
42 1 : return func(opt *checkpointOptions) {
43 1 : opt.flushWAL = true
44 1 : }
45 : }
46 :
47 : // WithRestrictToSpans specifies spans of interest for the checkpoint. Any SSTs
48 : // that don't overlap with any of these spans are excluded from the checkpoint.
49 : //
50 : // Note that the checkpoint can still surface keys outside of these spans (from
51 : // the WAL and from SSTs that partially overlap with these spans). Moreover,
52 : // these surface keys aren't necessarily "valid" in that they could have been
53 : // modified but the SST containing the modification is excluded.
54 1 : func WithRestrictToSpans(spans []CheckpointSpan) CheckpointOption {
55 1 : return func(opt *checkpointOptions) {
56 1 : opt.restrictToSpans = spans
57 1 : }
58 : }
59 :
60 : // CheckpointSpan is a key range [Start, End) (inclusive on Start, exclusive on
61 : // End) of interest for a checkpoint.
62 : type CheckpointSpan struct {
63 : Start []byte
64 : End []byte
65 : }
66 :
67 : // excludeFromCheckpoint returns true if an SST file should be excluded from the
68 : // checkpoint because it does not overlap with the spans of interest
69 : // (opt.restrictToSpans).
70 1 : func excludeFromCheckpoint(f *fileMetadata, opt *checkpointOptions, cmp Compare) bool {
71 1 : if len(opt.restrictToSpans) == 0 {
72 1 : // Option not set; don't exclude anything.
73 1 : return false
74 1 : }
75 1 : for _, s := range opt.restrictToSpans {
76 1 : if f.Overlaps(cmp, s.Start, s.End, true /* exclusiveEnd */) {
77 1 : return false
78 1 : }
79 : }
80 : // None of the restrictToSpans overlapped; we can exclude this file.
81 1 : return true
82 : }
83 :
84 : // mkdirAllAndSyncParents creates destDir and any of its missing parents.
85 : // Those missing parents, as well as the closest existing ancestor, are synced.
86 : // Returns a handle to the directory created at destDir.
87 1 : func mkdirAllAndSyncParents(fs vfs.FS, destDir string) (vfs.File, error) {
88 1 : // Collect paths for all directories between destDir (excluded) and its
89 1 : // closest existing ancestor (included).
90 1 : var parentPaths []string
91 1 : foundExistingAncestor := false
92 1 : for parentPath := fs.PathDir(destDir); parentPath != "."; parentPath = fs.PathDir(parentPath) {
93 1 : parentPaths = append(parentPaths, parentPath)
94 1 : _, err := fs.Stat(parentPath)
95 1 : if err == nil {
96 1 : // Exit loop at the closest existing ancestor.
97 1 : foundExistingAncestor = true
98 1 : break
99 : }
100 1 : if !oserror.IsNotExist(err) {
101 0 : return nil, err
102 0 : }
103 : }
104 : // Handle empty filesystem edge case.
105 1 : if !foundExistingAncestor {
106 1 : parentPaths = append(parentPaths, "")
107 1 : }
108 : // Create destDir and any of its missing parents.
109 1 : if err := fs.MkdirAll(destDir, 0755); err != nil {
110 0 : return nil, err
111 0 : }
112 : // Sync all the parent directories up to the closest existing ancestor,
113 : // included.
114 1 : for _, parentPath := range parentPaths {
115 1 : parentDir, err := fs.OpenDir(parentPath)
116 1 : if err != nil {
117 0 : return nil, err
118 0 : }
119 1 : err = parentDir.Sync()
120 1 : if err != nil {
121 0 : _ = parentDir.Close()
122 0 : return nil, err
123 0 : }
124 1 : err = parentDir.Close()
125 1 : if err != nil {
126 0 : return nil, err
127 0 : }
128 : }
129 1 : return fs.OpenDir(destDir)
130 : }
131 :
132 : // Checkpoint constructs a snapshot of the DB instance in the specified
133 : // directory. The WAL, MANIFEST, OPTIONS, and sstables will be copied into the
134 : // snapshot. Hard links will be used when possible. Beware of the significant
135 : // space overhead for a checkpoint if hard links are disabled. Also beware that
136 : // even if hard links are used, the space overhead for the checkpoint will
137 : // increase over time as the DB performs compactions.
138 : func (d *DB) Checkpoint(
139 : destDir string, opts ...CheckpointOption,
140 : ) (
141 : ckErr error, /* used in deferred cleanup */
142 1 : ) {
143 1 : opt := &checkpointOptions{}
144 1 : for _, fn := range opts {
145 1 : fn(opt)
146 1 : }
147 :
148 1 : if _, err := d.opts.FS.Stat(destDir); !oserror.IsNotExist(err) {
149 1 : if err == nil {
150 1 : return &os.PathError{
151 1 : Op: "checkpoint",
152 1 : Path: destDir,
153 1 : Err: oserror.ErrExist,
154 1 : }
155 1 : }
156 0 : return err
157 : }
158 :
159 1 : if opt.flushWAL && !d.opts.DisableWAL {
160 1 : // Write an empty log-data record to flush and sync the WAL.
161 1 : if err := d.LogData(nil /* data */, Sync); err != nil {
162 0 : return err
163 0 : }
164 : }
165 :
166 : // Disable file deletions.
167 1 : d.mu.Lock()
168 1 : d.disableFileDeletions()
169 1 : defer func() {
170 1 : d.mu.Lock()
171 1 : defer d.mu.Unlock()
172 1 : d.enableFileDeletions()
173 1 : }()
174 :
175 : // TODO(peter): RocksDB provides the option to roll the manifest if the
176 : // MANIFEST size is too large. Should we do this too?
177 :
178 : // Lock the manifest before getting the current version. We need the
179 : // length of the manifest that we read to match the current version that
180 : // we read, otherwise we might copy a versionEdit not reflected in the
181 : // sstables we copy/link.
182 1 : d.mu.versions.logLock()
183 1 : // Get the unflushed log files, the current version, and the current manifest
184 1 : // file number.
185 1 : memQueue := d.mu.mem.queue
186 1 : current := d.mu.versions.currentVersion()
187 1 : formatVers := d.FormatMajorVersion()
188 1 : manifestFileNum := d.mu.versions.manifestFileNum
189 1 : manifestSize := d.mu.versions.manifest.Size()
190 1 : optionsFileNum := d.optionsFileNum
191 1 : virtualBackingFiles := make(map[base.DiskFileNum]struct{})
192 1 : for diskFileNum := range d.mu.versions.backingState.fileBackingMap {
193 1 : virtualBackingFiles[diskFileNum] = struct{}{}
194 1 : }
195 : // Release the manifest and DB.mu so we don't block other operations on
196 : // the database.
197 1 : d.mu.versions.logUnlock()
198 1 : d.mu.Unlock()
199 1 :
200 1 : // Wrap the normal filesystem with one which wraps newly created files with
201 1 : // vfs.NewSyncingFile.
202 1 : fs := vfs.NewSyncingFS(d.opts.FS, vfs.SyncingFileOptions{
203 1 : NoSyncOnClose: d.opts.NoSyncOnClose,
204 1 : BytesPerSync: d.opts.BytesPerSync,
205 1 : })
206 1 :
207 1 : // Create the dir and its parents (if necessary), and sync them.
208 1 : var dir vfs.File
209 1 : defer func() {
210 1 : if dir != nil {
211 0 : _ = dir.Close()
212 0 : }
213 1 : if ckErr != nil {
214 0 : // Attempt to cleanup on error.
215 0 : _ = fs.RemoveAll(destDir)
216 0 : }
217 : }()
218 1 : dir, ckErr = mkdirAllAndSyncParents(fs, destDir)
219 1 : if ckErr != nil {
220 0 : return ckErr
221 0 : }
222 :
223 1 : {
224 1 : // Link or copy the OPTIONS.
225 1 : srcPath := base.MakeFilepath(fs, d.dirname, fileTypeOptions, optionsFileNum)
226 1 : destPath := fs.PathJoin(destDir, fs.PathBase(srcPath))
227 1 : ckErr = vfs.LinkOrCopy(fs, srcPath, destPath)
228 1 : if ckErr != nil {
229 0 : return ckErr
230 0 : }
231 : }
232 :
233 1 : {
234 1 : // Set the format major version in the destination directory.
235 1 : var versionMarker *atomicfs.Marker
236 1 : versionMarker, _, ckErr = atomicfs.LocateMarker(fs, destDir, formatVersionMarkerName)
237 1 : if ckErr != nil {
238 0 : return ckErr
239 0 : }
240 :
241 : // We use the marker to encode the active format version in the
242 : // marker filename. Unlike other uses of the atomic marker,
243 : // there is no file with the filename `formatVers.String()` on
244 : // the filesystem.
245 1 : ckErr = versionMarker.Move(formatVers.String())
246 1 : if ckErr != nil {
247 0 : return ckErr
248 0 : }
249 1 : ckErr = versionMarker.Close()
250 1 : if ckErr != nil {
251 0 : return ckErr
252 0 : }
253 : }
254 :
255 1 : var excludedFiles map[deletedFileEntry]*fileMetadata
256 1 : // Set of FileBacking.DiskFileNum which will be required by virtual sstables
257 1 : // in the checkpoint.
258 1 : requiredVirtualBackingFiles := make(map[base.DiskFileNum]struct{})
259 1 : // Link or copy the sstables.
260 1 : for l := range current.Levels {
261 1 : iter := current.Levels[l].Iter()
262 1 : for f := iter.First(); f != nil; f = iter.Next() {
263 1 : if excludeFromCheckpoint(f, opt, d.cmp) {
264 1 : if excludedFiles == nil {
265 1 : excludedFiles = make(map[deletedFileEntry]*fileMetadata)
266 1 : }
267 1 : excludedFiles[deletedFileEntry{
268 1 : Level: l,
269 1 : FileNum: f.FileNum,
270 1 : }] = f
271 1 : continue
272 : }
273 :
274 1 : fileBacking := f.FileBacking
275 1 : if f.Virtual {
276 1 : if _, ok := requiredVirtualBackingFiles[fileBacking.DiskFileNum]; ok {
277 1 : continue
278 : }
279 1 : requiredVirtualBackingFiles[fileBacking.DiskFileNum] = struct{}{}
280 : }
281 :
282 1 : srcPath := base.MakeFilepath(fs, d.dirname, fileTypeTable, fileBacking.DiskFileNum)
283 1 : destPath := fs.PathJoin(destDir, fs.PathBase(srcPath))
284 1 : ckErr = vfs.LinkOrCopy(fs, srcPath, destPath)
285 1 : if ckErr != nil {
286 0 : return ckErr
287 0 : }
288 : }
289 : }
290 :
291 1 : var removeBackingTables []base.DiskFileNum
292 1 : for diskFileNum := range virtualBackingFiles {
293 1 : if _, ok := requiredVirtualBackingFiles[diskFileNum]; !ok {
294 1 : // The backing sstable associated with fileNum is no longer
295 1 : // required.
296 1 : removeBackingTables = append(removeBackingTables, diskFileNum)
297 1 : }
298 : }
299 :
300 1 : ckErr = d.writeCheckpointManifest(
301 1 : fs, formatVers, destDir, dir, manifestFileNum, manifestSize,
302 1 : excludedFiles, removeBackingTables,
303 1 : )
304 1 : if ckErr != nil {
305 0 : return ckErr
306 0 : }
307 :
308 : // Copy the WAL files. We copy rather than link because WAL file recycling
309 : // will cause the WAL files to be reused which would invalidate the
310 : // checkpoint.
311 1 : for i := range memQueue {
312 1 : logNum := memQueue[i].logNum
313 1 : if logNum == 0 {
314 1 : continue
315 : }
316 1 : srcPath := base.MakeFilepath(fs, d.walDirname, fileTypeLog, logNum)
317 1 : destPath := fs.PathJoin(destDir, fs.PathBase(srcPath))
318 1 : ckErr = vfs.Copy(fs, srcPath, destPath)
319 1 : if ckErr != nil {
320 0 : return ckErr
321 0 : }
322 : }
323 :
324 : // Sync and close the checkpoint directory.
325 1 : ckErr = dir.Sync()
326 1 : if ckErr != nil {
327 0 : return ckErr
328 0 : }
329 1 : ckErr = dir.Close()
330 1 : dir = nil
331 1 : return ckErr
332 : }
333 :
334 : func (d *DB) writeCheckpointManifest(
335 : fs vfs.FS,
336 : formatVers FormatMajorVersion,
337 : destDirPath string,
338 : destDir vfs.File,
339 : manifestFileNum base.DiskFileNum,
340 : manifestSize int64,
341 : excludedFiles map[deletedFileEntry]*fileMetadata,
342 : removeBackingTables []base.DiskFileNum,
343 1 : ) error {
344 1 : // Copy the MANIFEST, and create a pointer to it. We copy rather
345 1 : // than link because additional version edits added to the
346 1 : // MANIFEST after we took our snapshot of the sstables will
347 1 : // reference sstables that aren't in our checkpoint. For a
348 1 : // similar reason, we need to limit how much of the MANIFEST we
349 1 : // copy.
350 1 : // If some files are excluded from the checkpoint, also append a block that
351 1 : // records those files as deleted.
352 1 : if err := func() error {
353 1 : srcPath := base.MakeFilepath(fs, d.dirname, fileTypeManifest, manifestFileNum)
354 1 : destPath := fs.PathJoin(destDirPath, fs.PathBase(srcPath))
355 1 : src, err := fs.Open(srcPath, vfs.SequentialReadsOption)
356 1 : if err != nil {
357 0 : return err
358 0 : }
359 1 : defer src.Close()
360 1 :
361 1 : dst, err := fs.Create(destPath)
362 1 : if err != nil {
363 0 : return err
364 0 : }
365 1 : defer dst.Close()
366 1 :
367 1 : // Copy all existing records. We need to copy at the record level in case we
368 1 : // need to append another record with the excluded files (we cannot simply
369 1 : // append a record after a raw data copy; see
370 1 : // https://github.com/cockroachdb/cockroach/issues/100935).
371 1 : r := record.NewReader(&io.LimitedReader{R: src, N: manifestSize}, manifestFileNum)
372 1 : w := record.NewWriter(dst)
373 1 : for {
374 1 : rr, err := r.Next()
375 1 : if err != nil {
376 1 : if err == io.EOF {
377 1 : break
378 : }
379 0 : return err
380 : }
381 :
382 1 : rw, err := w.Next()
383 1 : if err != nil {
384 0 : return err
385 0 : }
386 1 : if _, err := io.Copy(rw, rr); err != nil {
387 0 : return err
388 0 : }
389 : }
390 :
391 1 : if len(excludedFiles) > 0 {
392 1 : // Write out an additional VersionEdit that deletes the excluded SST files.
393 1 : ve := versionEdit{
394 1 : DeletedFiles: excludedFiles,
395 1 : RemovedBackingTables: removeBackingTables,
396 1 : }
397 1 :
398 1 : rw, err := w.Next()
399 1 : if err != nil {
400 0 : return err
401 0 : }
402 1 : if err := ve.Encode(rw); err != nil {
403 0 : return err
404 0 : }
405 : }
406 1 : if err := w.Close(); err != nil {
407 0 : return err
408 0 : }
409 1 : return dst.Sync()
410 0 : }(); err != nil {
411 0 : return err
412 0 : }
413 :
414 1 : var manifestMarker *atomicfs.Marker
415 1 : manifestMarker, _, err := atomicfs.LocateMarker(fs, destDirPath, manifestMarkerName)
416 1 : if err != nil {
417 0 : return err
418 0 : }
419 1 : if err := manifestMarker.Move(base.MakeFilename(fileTypeManifest, manifestFileNum)); err != nil {
420 0 : return err
421 0 : }
422 1 : return manifestMarker.Close()
423 : }
|