Line data Source code
1 : // Copyright 2011 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package pebble
6 :
7 : import (
8 : "bytes"
9 : "fmt"
10 : "io"
11 : "runtime"
12 : "strconv"
13 : "strings"
14 : "time"
15 : "unicode"
16 :
17 : "github.com/cockroachdb/errors"
18 : "github.com/cockroachdb/pebble/internal/base"
19 : "github.com/cockroachdb/pebble/internal/cache"
20 : "github.com/cockroachdb/pebble/internal/humanize"
21 : "github.com/cockroachdb/pebble/internal/keyspan"
22 : "github.com/cockroachdb/pebble/internal/manifest"
23 : "github.com/cockroachdb/pebble/objstorage/remote"
24 : "github.com/cockroachdb/pebble/rangekey"
25 : "github.com/cockroachdb/pebble/sstable"
26 : "github.com/cockroachdb/pebble/vfs"
27 : "github.com/cockroachdb/pebble/wal"
28 : )
29 :
30 : const (
31 : cacheDefaultSize = 8 << 20 // 8 MB
32 : defaultLevelMultiplier = 10
33 : )
34 :
35 : // Compression exports the base.Compression type.
36 : type Compression = sstable.Compression
37 :
38 : // Exported Compression constants.
39 : const (
40 : DefaultCompression = sstable.DefaultCompression
41 : NoCompression = sstable.NoCompression
42 : SnappyCompression = sstable.SnappyCompression
43 : ZstdCompression = sstable.ZstdCompression
44 : )
45 :
46 : // FilterType exports the base.FilterType type.
47 : type FilterType = base.FilterType
48 :
49 : // Exported TableFilter constants.
50 : const (
51 : TableFilter = base.TableFilter
52 : )
53 :
54 : // FilterWriter exports the base.FilterWriter type.
55 : type FilterWriter = base.FilterWriter
56 :
57 : // FilterPolicy exports the base.FilterPolicy type.
58 : type FilterPolicy = base.FilterPolicy
59 :
60 : // BlockPropertyCollector exports the sstable.BlockPropertyCollector type.
61 : type BlockPropertyCollector = sstable.BlockPropertyCollector
62 :
63 : // BlockPropertyFilter exports the sstable.BlockPropertyFilter type.
64 : type BlockPropertyFilter = base.BlockPropertyFilter
65 :
66 : // ShortAttributeExtractor exports the base.ShortAttributeExtractor type.
67 : type ShortAttributeExtractor = base.ShortAttributeExtractor
68 :
69 : // UserKeyPrefixBound exports the sstable.UserKeyPrefixBound type.
70 : type UserKeyPrefixBound = sstable.UserKeyPrefixBound
71 :
72 : // IterKeyType configures which types of keys an iterator should surface.
73 : type IterKeyType int8
74 :
75 : const (
76 : // IterKeyTypePointsOnly configures an iterator to iterate over point keys
77 : // only.
78 : IterKeyTypePointsOnly IterKeyType = iota
79 : // IterKeyTypeRangesOnly configures an iterator to iterate over range keys
80 : // only.
81 : IterKeyTypeRangesOnly
82 : // IterKeyTypePointsAndRanges configures an iterator iterate over both point
83 : // keys and range keys simultaneously.
84 : IterKeyTypePointsAndRanges
85 : )
86 :
87 : // String implements fmt.Stringer.
88 1 : func (t IterKeyType) String() string {
89 1 : switch t {
90 1 : case IterKeyTypePointsOnly:
91 1 : return "points-only"
92 1 : case IterKeyTypeRangesOnly:
93 1 : return "ranges-only"
94 1 : case IterKeyTypePointsAndRanges:
95 1 : return "points-and-ranges"
96 0 : default:
97 0 : panic(fmt.Sprintf("unknown key type %d", t))
98 : }
99 : }
100 :
101 : // IterOptions hold the optional per-query parameters for NewIter.
102 : //
103 : // Like Options, a nil *IterOptions is valid and means to use the default
104 : // values.
105 : type IterOptions struct {
106 : // LowerBound specifies the smallest key (inclusive) that the iterator will
107 : // return during iteration. If the iterator is seeked or iterated past this
108 : // boundary the iterator will return Valid()==false. Setting LowerBound
109 : // effectively truncates the key space visible to the iterator.
110 : LowerBound []byte
111 : // UpperBound specifies the largest key (exclusive) that the iterator will
112 : // return during iteration. If the iterator is seeked or iterated past this
113 : // boundary the iterator will return Valid()==false. Setting UpperBound
114 : // effectively truncates the key space visible to the iterator.
115 : UpperBound []byte
116 : // TableFilter can be used to filter the tables that are scanned during
117 : // iteration based on the user properties. Return true to scan the table and
118 : // false to skip scanning. This function must be thread-safe since the same
119 : // function can be used by multiple iterators, if the iterator is cloned.
120 : TableFilter func(userProps map[string]string) bool
121 : // SkipPoint may be used to skip over point keys that don't match an
122 : // arbitrary predicate during iteration. If set, the Iterator invokes
123 : // SkipPoint for keys encountered. If SkipPoint returns true, the iterator
124 : // will skip the key without yielding it to the iterator operation in
125 : // progress.
126 : //
127 : // SkipPoint must be a pure function and always return the same result when
128 : // provided the same arguments. The iterator may call SkipPoint multiple
129 : // times for the same user key.
130 : SkipPoint func(userKey []byte) bool
131 : // PointKeyFilters can be used to avoid scanning tables and blocks in tables
132 : // when iterating over point keys. This slice represents an intersection
133 : // across all filters, i.e., all filters must indicate that the block is
134 : // relevant.
135 : //
136 : // Performance note: When len(PointKeyFilters) > 0, the caller should ensure
137 : // that cap(PointKeyFilters) is at least len(PointKeyFilters)+1. This helps
138 : // avoid allocations in Pebble internal code that mutates the slice.
139 : PointKeyFilters []BlockPropertyFilter
140 : // RangeKeyFilters can be usefd to avoid scanning tables and blocks in tables
141 : // when iterating over range keys. The same requirements that apply to
142 : // PointKeyFilters apply here too.
143 : RangeKeyFilters []BlockPropertyFilter
144 : // KeyTypes configures which types of keys to iterate over: point keys,
145 : // range keys, or both.
146 : KeyTypes IterKeyType
147 : // RangeKeyMasking can be used to enable automatic masking of point keys by
148 : // range keys. Range key masking is only supported during combined range key
149 : // and point key iteration mode (IterKeyTypePointsAndRanges).
150 : RangeKeyMasking RangeKeyMasking
151 :
152 : // OnlyReadGuaranteedDurable is an advanced option that is only supported by
153 : // the Reader implemented by DB. When set to true, only the guaranteed to be
154 : // durable state is visible in the iterator.
155 : // - This definition is made under the assumption that the FS implementation
156 : // is providing a durability guarantee when data is synced.
157 : // - The visible state represents a consistent point in the history of the
158 : // DB.
159 : // - The implementation is free to choose a conservative definition of what
160 : // is guaranteed durable. For simplicity, the current implementation
161 : // ignores memtables. A more sophisticated implementation could track the
162 : // highest seqnum that is synced to the WAL and published and use that as
163 : // the visible seqnum for an iterator. Note that the latter approach is
164 : // not strictly better than the former since we can have DBs that are (a)
165 : // synced more rarely than memtable flushes, (b) have no WAL. (a) is
166 : // likely to be true in a future CockroachDB context where the DB
167 : // containing the state machine may be rarely synced.
168 : // NB: this current implementation relies on the fact that memtables are
169 : // flushed in seqnum order, and any ingested sstables that happen to have a
170 : // lower seqnum than a non-flushed memtable don't have any overlapping keys.
171 : // This is the fundamental level invariant used in other code too, like when
172 : // merging iterators.
173 : //
174 : // Semantically, using this option provides the caller a "snapshot" as of
175 : // the time the most recent memtable was flushed. An alternate interface
176 : // would be to add a NewSnapshot variant. Creating a snapshot is heavier
177 : // weight than creating an iterator, so we have opted to support this
178 : // iterator option.
179 : OnlyReadGuaranteedDurable bool
180 : // UseL6Filters allows the caller to opt into reading filter blocks for L6
181 : // sstables. Helpful if a lot of SeekPrefixGEs are expected in quick
182 : // succession, that are also likely to not yield a single key. Filter blocks in
183 : // L6 can be relatively large, often larger than data blocks, so the benefit of
184 : // loading them in the cache is minimized if the probability of the key
185 : // existing is not low or if we just expect a one-time Seek (where loading the
186 : // data block directly is better).
187 : UseL6Filters bool
188 : // CategoryAndQoS is used for categorized iterator stats. This should not be
189 : // changed by calling SetOptions.
190 : sstable.CategoryAndQoS
191 :
192 : DebugRangeKeyStack bool
193 :
194 : // Internal options.
195 :
196 : logger Logger
197 : // Level corresponding to this file. Only passed in if constructed by a
198 : // levelIter.
199 : level manifest.Level
200 : // disableLazyCombinedIteration is an internal testing option.
201 : disableLazyCombinedIteration bool
202 : // snapshotForHideObsoletePoints is specified for/by levelIter when opening
203 : // files and is used to decide whether to hide obsolete points. A value of 0
204 : // implies obsolete points should not be hidden.
205 : snapshotForHideObsoletePoints uint64
206 :
207 : // NB: If adding new Options, you must account for them in iterator
208 : // construction and Iterator.SetOptions.
209 : }
210 :
211 : // GetLowerBound returns the LowerBound or nil if the receiver is nil.
212 1 : func (o *IterOptions) GetLowerBound() []byte {
213 1 : if o == nil {
214 1 : return nil
215 1 : }
216 1 : return o.LowerBound
217 : }
218 :
219 : // GetUpperBound returns the UpperBound or nil if the receiver is nil.
220 1 : func (o *IterOptions) GetUpperBound() []byte {
221 1 : if o == nil {
222 1 : return nil
223 1 : }
224 1 : return o.UpperBound
225 : }
226 :
227 1 : func (o *IterOptions) pointKeys() bool {
228 1 : if o == nil {
229 0 : return true
230 0 : }
231 1 : return o.KeyTypes == IterKeyTypePointsOnly || o.KeyTypes == IterKeyTypePointsAndRanges
232 : }
233 :
234 1 : func (o *IterOptions) rangeKeys() bool {
235 1 : if o == nil {
236 0 : return false
237 0 : }
238 1 : return o.KeyTypes == IterKeyTypeRangesOnly || o.KeyTypes == IterKeyTypePointsAndRanges
239 : }
240 :
241 1 : func (o *IterOptions) getLogger() Logger {
242 1 : if o == nil || o.logger == nil {
243 1 : return DefaultLogger
244 1 : }
245 1 : return o.logger
246 : }
247 :
248 : // SpanIterOptions creates a SpanIterOptions from this IterOptions.
249 1 : func (o *IterOptions) SpanIterOptions() keyspan.SpanIterOptions {
250 1 : if o == nil {
251 1 : return keyspan.SpanIterOptions{}
252 1 : }
253 1 : return keyspan.SpanIterOptions{
254 1 : RangeKeyFilters: o.RangeKeyFilters,
255 1 : }
256 : }
257 :
258 : // scanInternalOptions is similar to IterOptions, meant for use with
259 : // scanInternalIterator.
260 : type scanInternalOptions struct {
261 : sstable.CategoryAndQoS
262 : IterOptions
263 :
264 : visitPointKey func(key *InternalKey, value LazyValue, iterInfo IteratorLevel) error
265 : visitRangeDel func(start, end []byte, seqNum uint64) error
266 : visitRangeKey func(start, end []byte, keys []rangekey.Key) error
267 : visitSharedFile func(sst *SharedSSTMeta) error
268 : visitExternalFile func(sst *ExternalFile) error
269 :
270 : // includeObsoleteKeys specifies whether keys shadowed by newer internal keys
271 : // are exposed. If false, only one internal key per user key is exposed.
272 : includeObsoleteKeys bool
273 :
274 : // rateLimitFunc is used to limit the amount of bytes read per second.
275 : rateLimitFunc func(key *InternalKey, value LazyValue) error
276 : }
277 :
278 : // RangeKeyMasking configures automatic hiding of point keys by range keys. A
279 : // non-nil Suffix enables range-key masking. When enabled, range keys with
280 : // suffixes ≥ Suffix behave as masks. All point keys that are contained within a
281 : // masking range key's bounds and have suffixes greater than the range key's
282 : // suffix are automatically skipped.
283 : //
284 : // Specifically, when configured with a RangeKeyMasking.Suffix _s_, and there
285 : // exists a range key with suffix _r_ covering a point key with suffix _p_, and
286 : //
287 : // _s_ ≤ _r_ < _p_
288 : //
289 : // then the point key is elided.
290 : //
291 : // Range-key masking may only be used when iterating over both point keys and
292 : // range keys with IterKeyTypePointsAndRanges.
293 : type RangeKeyMasking struct {
294 : // Suffix configures which range keys may mask point keys. Only range keys
295 : // that are defined at suffixes greater than or equal to Suffix will mask
296 : // point keys.
297 : Suffix []byte
298 : // Filter is an optional field that may be used to improve performance of
299 : // range-key masking through a block-property filter defined over key
300 : // suffixes. If non-nil, Filter is called by Pebble to construct a
301 : // block-property filter mask at iterator creation. The filter is used to
302 : // skip whole point-key blocks containing point keys with suffixes greater
303 : // than a covering range-key's suffix.
304 : //
305 : // To use this functionality, the caller must create and configure (through
306 : // Options.BlockPropertyCollectors) a block-property collector that records
307 : // the maxmimum suffix contained within a block. The caller then must write
308 : // and provide a BlockPropertyFilterMask implementation on that same
309 : // property. See the BlockPropertyFilterMask type for more information.
310 : Filter func() BlockPropertyFilterMask
311 : }
312 :
313 : // BlockPropertyFilterMask extends the BlockPropertyFilter interface for use
314 : // with range-key masking. Unlike an ordinary block property filter, a
315 : // BlockPropertyFilterMask's filtering criteria is allowed to change when Pebble
316 : // invokes its SetSuffix method.
317 : //
318 : // When a Pebble iterator steps into a range key's bounds and the range key has
319 : // a suffix greater than or equal to RangeKeyMasking.Suffix, the range key acts
320 : // as a mask. The masking range key hides all point keys that fall within the
321 : // range key's bounds and have suffixes > the range key's suffix. Without a
322 : // filter mask configured, Pebble performs this hiding by stepping through point
323 : // keys and comparing suffixes. If large numbers of point keys are masked, this
324 : // requires Pebble to load, iterate through and discard a large number of
325 : // sstable blocks containing masked point keys.
326 : //
327 : // If a block-property collector and a filter mask are configured, Pebble may
328 : // skip loading some point-key blocks altogether. If a block's keys are known to
329 : // all fall within the bounds of the masking range key and the block was
330 : // annotated by a block-property collector with the maximal suffix, Pebble can
331 : // ask the filter mask to compare the property to the current masking range
332 : // key's suffix. If the mask reports no intersection, the block may be skipped.
333 : //
334 : // If unsuffixed and suffixed keys are written to the database, care must be
335 : // taken to avoid unintentionally masking un-suffixed keys located in the same
336 : // block as suffixed keys. One solution is to interpret unsuffixed keys as
337 : // containing the maximal suffix value, ensuring that blocks containing
338 : // unsuffixed keys are always loaded.
339 : type BlockPropertyFilterMask interface {
340 : BlockPropertyFilter
341 :
342 : // SetSuffix configures the mask with the suffix of a range key. The filter
343 : // should return false from Intersects whenever it's provided with a
344 : // property encoding a block's minimum suffix that's greater (according to
345 : // Compare) than the provided suffix.
346 : SetSuffix(suffix []byte) error
347 : }
348 :
349 : // WriteOptions hold the optional per-query parameters for Set and Delete
350 : // operations.
351 : //
352 : // Like Options, a nil *WriteOptions is valid and means to use the default
353 : // values.
354 : type WriteOptions struct {
355 : // Sync is whether to sync writes through the OS buffer cache and down onto
356 : // the actual disk, if applicable. Setting Sync is required for durability of
357 : // individual write operations but can result in slower writes.
358 : //
359 : // If false, and the process or machine crashes, then a recent write may be
360 : // lost. This is due to the recently written data being buffered inside the
361 : // process running Pebble. This differs from the semantics of a write system
362 : // call in which the data is buffered in the OS buffer cache and would thus
363 : // survive a process crash.
364 : //
365 : // The default value is true.
366 : Sync bool
367 : }
368 :
369 : // Sync specifies the default write options for writes which synchronize to
370 : // disk.
371 : var Sync = &WriteOptions{Sync: true}
372 :
373 : // NoSync specifies the default write options for writes which do not
374 : // synchronize to disk.
375 : var NoSync = &WriteOptions{Sync: false}
376 :
377 : // GetSync returns the Sync value or true if the receiver is nil.
378 1 : func (o *WriteOptions) GetSync() bool {
379 1 : return o == nil || o.Sync
380 1 : }
381 :
382 : // LevelOptions holds the optional per-level parameters.
383 : type LevelOptions struct {
384 : // BlockRestartInterval is the number of keys between restart points
385 : // for delta encoding of keys.
386 : //
387 : // The default value is 16.
388 : BlockRestartInterval int
389 :
390 : // BlockSize is the target uncompressed size in bytes of each table block.
391 : //
392 : // The default value is 4096.
393 : BlockSize int
394 :
395 : // BlockSizeThreshold finishes a block if the block size is larger than the
396 : // specified percentage of the target block size and adding the next entry
397 : // would cause the block to be larger than the target block size.
398 : //
399 : // The default value is 90
400 : BlockSizeThreshold int
401 :
402 : // Compression defines the per-block compression to use.
403 : //
404 : // The default value (DefaultCompression) uses snappy compression.
405 : Compression func() Compression
406 :
407 : // FilterPolicy defines a filter algorithm (such as a Bloom filter) that can
408 : // reduce disk reads for Get calls.
409 : //
410 : // One such implementation is bloom.FilterPolicy(10) from the pebble/bloom
411 : // package.
412 : //
413 : // The default value means to use no filter.
414 : FilterPolicy FilterPolicy
415 :
416 : // FilterType defines whether an existing filter policy is applied at a
417 : // block-level or table-level. Block-level filters use less memory to create,
418 : // but are slower to access as a check for the key in the index must first be
419 : // performed to locate the filter block. A table-level filter will require
420 : // memory proportional to the number of keys in an sstable to create, but
421 : // avoids the index lookup when determining if a key is present. Table-level
422 : // filters should be preferred except under constrained memory situations.
423 : FilterType FilterType
424 :
425 : // IndexBlockSize is the target uncompressed size in bytes of each index
426 : // block. When the index block size is larger than this target, two-level
427 : // indexes are automatically enabled. Setting this option to a large value
428 : // (such as math.MaxInt32) disables the automatic creation of two-level
429 : // indexes.
430 : //
431 : // The default value is the value of BlockSize.
432 : IndexBlockSize int
433 :
434 : // The target file size for the level.
435 : TargetFileSize int64
436 : }
437 :
438 : // EnsureDefaults ensures that the default values for all of the options have
439 : // been initialized. It is valid to call EnsureDefaults on a nil receiver. A
440 : // non-nil result will always be returned.
441 1 : func (o *LevelOptions) EnsureDefaults() *LevelOptions {
442 1 : if o == nil {
443 0 : o = &LevelOptions{}
444 0 : }
445 1 : if o.BlockRestartInterval <= 0 {
446 1 : o.BlockRestartInterval = base.DefaultBlockRestartInterval
447 1 : }
448 1 : if o.BlockSize <= 0 {
449 1 : o.BlockSize = base.DefaultBlockSize
450 1 : } else if o.BlockSize > sstable.MaximumBlockSize {
451 0 : panic(errors.Errorf("BlockSize %d exceeds MaximumBlockSize", o.BlockSize))
452 : }
453 1 : if o.BlockSizeThreshold <= 0 {
454 1 : o.BlockSizeThreshold = base.DefaultBlockSizeThreshold
455 1 : }
456 1 : if o.Compression == nil {
457 1 : o.Compression = func() Compression { return DefaultCompression }
458 : }
459 1 : if o.IndexBlockSize <= 0 {
460 1 : o.IndexBlockSize = o.BlockSize
461 1 : }
462 1 : if o.TargetFileSize <= 0 {
463 1 : o.TargetFileSize = 2 << 20 // 2 MB
464 1 : }
465 1 : return o
466 : }
467 :
468 : // Options holds the optional parameters for configuring pebble. These options
469 : // apply to the DB at large; per-query options are defined by the IterOptions
470 : // and WriteOptions types.
471 : type Options struct {
472 : // Sync sstables periodically in order to smooth out writes to disk. This
473 : // option does not provide any persistency guarantee, but is used to avoid
474 : // latency spikes if the OS automatically decides to write out a large chunk
475 : // of dirty filesystem buffers. This option only controls SSTable syncs; WAL
476 : // syncs are controlled by WALBytesPerSync.
477 : //
478 : // The default value is 512KB.
479 : BytesPerSync int
480 :
481 : // Cache is used to cache uncompressed blocks from sstables.
482 : //
483 : // The default cache size is 8 MB.
484 : Cache *cache.Cache
485 :
486 : // Cleaner cleans obsolete files.
487 : //
488 : // The default cleaner uses the DeleteCleaner.
489 : Cleaner Cleaner
490 :
491 : // Comparer defines a total ordering over the space of []byte keys: a 'less
492 : // than' relationship. The same comparison algorithm must be used for reads
493 : // and writes over the lifetime of the DB.
494 : //
495 : // The default value uses the same ordering as bytes.Compare.
496 : Comparer *Comparer
497 :
498 : // DebugCheck is invoked, if non-nil, whenever a new version is being
499 : // installed. Typically, this is set to pebble.DebugCheckLevels in tests
500 : // or tools only, to check invariants over all the data in the database.
501 : DebugCheck func(*DB) error
502 :
503 : // Disable the write-ahead log (WAL). Disabling the write-ahead log prohibits
504 : // crash recovery, but can improve performance if crash recovery is not
505 : // needed (e.g. when only temporary state is being stored in the database).
506 : //
507 : // TODO(peter): untested
508 : DisableWAL bool
509 :
510 : // ErrorIfExists causes an error on Open if the database already exists.
511 : // The error can be checked with errors.Is(err, ErrDBAlreadyExists).
512 : //
513 : // The default value is false.
514 : ErrorIfExists bool
515 :
516 : // ErrorIfNotExists causes an error on Open if the database does not already
517 : // exist. The error can be checked with errors.Is(err, ErrDBDoesNotExist).
518 : //
519 : // The default value is false which will cause a database to be created if it
520 : // does not already exist.
521 : ErrorIfNotExists bool
522 :
523 : // ErrorIfNotPristine causes an error on Open if the database already exists
524 : // and any operations have been performed on the database. The error can be
525 : // checked with errors.Is(err, ErrDBNotPristine).
526 : //
527 : // Note that a database that contained keys that were all subsequently deleted
528 : // may or may not trigger the error. Currently, we check if there are any live
529 : // SSTs or log records to replay.
530 : ErrorIfNotPristine bool
531 :
532 : // EventListener provides hooks to listening to significant DB events such as
533 : // flushes, compactions, and table deletion.
534 : EventListener *EventListener
535 :
536 : // Experimental contains experimental options which are off by default.
537 : // These options are temporary and will eventually either be deleted, moved
538 : // out of the experimental group, or made the non-adjustable default. These
539 : // options may change at any time, so do not rely on them.
540 : Experimental struct {
541 : // The threshold of L0 read-amplification at which compaction concurrency
542 : // is enabled (if CompactionDebtConcurrency was not already exceeded).
543 : // Every multiple of this value enables another concurrent
544 : // compaction up to MaxConcurrentCompactions.
545 : L0CompactionConcurrency int
546 :
547 : // CompactionDebtConcurrency controls the threshold of compaction debt
548 : // at which additional compaction concurrency slots are added. For every
549 : // multiple of this value in compaction debt bytes, an additional
550 : // concurrent compaction is added. This works "on top" of
551 : // L0CompactionConcurrency, so the higher of the count of compaction
552 : // concurrency slots as determined by the two options is chosen.
553 : CompactionDebtConcurrency uint64
554 :
555 : // IngestSplit, if it returns true, allows for ingest-time splitting of
556 : // existing sstables into two virtual sstables to allow ingestion sstables to
557 : // slot into a lower level than they otherwise would have.
558 : IngestSplit func() bool
559 :
560 : // ReadCompactionRate controls the frequency of read triggered
561 : // compactions by adjusting `AllowedSeeks` in manifest.FileMetadata:
562 : //
563 : // AllowedSeeks = FileSize / ReadCompactionRate
564 : //
565 : // From LevelDB:
566 : // ```
567 : // We arrange to automatically compact this file after
568 : // a certain number of seeks. Let's assume:
569 : // (1) One seek costs 10ms
570 : // (2) Writing or reading 1MB costs 10ms (100MB/s)
571 : // (3) A compaction of 1MB does 25MB of IO:
572 : // 1MB read from this level
573 : // 10-12MB read from next level (boundaries may be misaligned)
574 : // 10-12MB written to next level
575 : // This implies that 25 seeks cost the same as the compaction
576 : // of 1MB of data. I.e., one seek costs approximately the
577 : // same as the compaction of 40KB of data. We are a little
578 : // conservative and allow approximately one seek for every 16KB
579 : // of data before triggering a compaction.
580 : // ```
581 : ReadCompactionRate int64
582 :
583 : // ReadSamplingMultiplier is a multiplier for the readSamplingPeriod in
584 : // iterator.maybeSampleRead() to control the frequency of read sampling
585 : // to trigger a read triggered compaction. A value of -1 prevents sampling
586 : // and disables read triggered compactions. The default is 1 << 4. which
587 : // gets multiplied with a constant of 1 << 16 to yield 1 << 20 (1MB).
588 : ReadSamplingMultiplier int64
589 :
590 : // TableCacheShards is the number of shards per table cache.
591 : // Reducing the value can reduce the number of idle goroutines per DB
592 : // instance which can be useful in scenarios with a lot of DB instances
593 : // and a large number of CPUs, but doing so can lead to higher contention
594 : // in the table cache and reduced performance.
595 : //
596 : // The default value is the number of logical CPUs, which can be
597 : // limited by runtime.GOMAXPROCS.
598 : TableCacheShards int
599 :
600 : // KeyValidationFunc is a function to validate a user key in an SSTable.
601 : //
602 : // Currently, this function is used to validate the smallest and largest
603 : // keys in an SSTable undergoing compaction. In this case, returning an
604 : // error from the validation function will result in a panic at runtime,
605 : // given that there is rarely any way of recovering from malformed keys
606 : // present in compacted files. By default, validation is not performed.
607 : //
608 : // Additional use-cases may be added in the future.
609 : //
610 : // NOTE: callers should take care to not mutate the key being validated.
611 : KeyValidationFunc func(userKey []byte) error
612 :
613 : // ValidateOnIngest schedules validation of sstables after they have
614 : // been ingested.
615 : //
616 : // By default, this value is false.
617 : ValidateOnIngest bool
618 :
619 : // LevelMultiplier configures the size multiplier used to determine the
620 : // desired size of each level of the LSM. Defaults to 10.
621 : LevelMultiplier int
622 :
623 : // MultiLevelCompactionHeuristic determines whether to add an additional
624 : // level to a conventional two level compaction. If nil, a multilevel
625 : // compaction will never get triggered.
626 : MultiLevelCompactionHeuristic MultiLevelHeuristic
627 :
628 : // MaxWriterConcurrency is used to indicate the maximum number of
629 : // compression workers the compression queue is allowed to use. If
630 : // MaxWriterConcurrency > 0, then the Writer will use parallelism, to
631 : // compress and write blocks to disk. Otherwise, the writer will
632 : // compress and write blocks to disk synchronously.
633 : MaxWriterConcurrency int
634 :
635 : // ForceWriterParallelism is used to force parallelism in the sstable
636 : // Writer for the metamorphic tests. Even with the MaxWriterConcurrency
637 : // option set, we only enable parallelism in the sstable Writer if there
638 : // is enough CPU available, and this option bypasses that.
639 : ForceWriterParallelism bool
640 :
641 : // CPUWorkPermissionGranter should be set if Pebble should be given the
642 : // ability to optionally schedule additional CPU. See the documentation
643 : // for CPUWorkPermissionGranter for more details.
644 : CPUWorkPermissionGranter CPUWorkPermissionGranter
645 :
646 : // EnableValueBlocks is used to decide whether to enable writing
647 : // TableFormatPebblev3 sstables. This setting is only respected by a
648 : // specific subset of format major versions: FormatSSTableValueBlocks,
649 : // FormatFlushableIngest and FormatPrePebblev1MarkedCompacted. In lower
650 : // format major versions, value blocks are never enabled. In higher
651 : // format major versions, value blocks are always enabled.
652 : EnableValueBlocks func() bool
653 :
654 : // ShortAttributeExtractor is used iff EnableValueBlocks() returns true
655 : // (else ignored). If non-nil, a ShortAttribute can be extracted from the
656 : // value and stored with the key, when the value is stored elsewhere.
657 : ShortAttributeExtractor ShortAttributeExtractor
658 :
659 : // RequiredInPlaceValueBound specifies an optional span of user key
660 : // prefixes that are not-MVCC, but have a suffix. For these the values
661 : // must be stored with the key, since the concept of "older versions" is
662 : // not defined. It is also useful for statically known exclusions to value
663 : // separation. In CockroachDB, this will be used for the lock table key
664 : // space that has non-empty suffixes, but those locks don't represent
665 : // actual MVCC versions (the suffix ordering is arbitrary). We will also
666 : // need to add support for dynamically configured exclusions (we want the
667 : // default to be to allow Pebble to decide whether to separate the value
668 : // or not, hence this is structured as exclusions), for example, for users
669 : // of CockroachDB to dynamically exclude certain tables.
670 : //
671 : // Any change in exclusion behavior takes effect only on future written
672 : // sstables, and does not start rewriting existing sstables.
673 : //
674 : // Even ignoring changes in this setting, exclusions are interpreted as a
675 : // guidance by Pebble, and not necessarily honored. Specifically, user
676 : // keys with multiple Pebble-versions *may* have the older versions stored
677 : // in value blocks.
678 : RequiredInPlaceValueBound UserKeyPrefixBound
679 :
680 : // DisableIngestAsFlushable disables lazy ingestion of sstables through
681 : // a WAL write and memtable rotation. Only effectual if the the format
682 : // major version is at least `FormatFlushableIngest`.
683 : DisableIngestAsFlushable func() bool
684 :
685 : // RemoteStorage enables use of remote storage (e.g. S3) for storing
686 : // sstables. Setting this option enables use of CreateOnShared option and
687 : // allows ingestion of external files.
688 : RemoteStorage remote.StorageFactory
689 :
690 : // If CreateOnShared is non-zero, new sstables are created on remote storage
691 : // (using CreateOnSharedLocator and with the appropriate
692 : // CreateOnSharedStrategy). These sstables can be shared between different
693 : // Pebble instances; the lifecycle of such objects is managed by the
694 : // remote.Storage constructed by options.RemoteStorage.
695 : //
696 : // Can only be used when RemoteStorage is set (and recognizes
697 : // CreateOnSharedLocator).
698 : CreateOnShared remote.CreateOnSharedStrategy
699 : CreateOnSharedLocator remote.Locator
700 :
701 : // CacheSizeBytesBytes is the size of the on-disk block cache for objects
702 : // on shared storage in bytes. If it is 0, no cache is used.
703 : SecondaryCacheSizeBytes int64
704 :
705 : // NB: DO NOT crash on SingleDeleteInvariantViolationCallback or
706 : // IneffectualSingleDeleteCallback, since these can be false positives
707 : // even if SingleDel has been used correctly.
708 : //
709 : // Pebble's delete-only compactions can cause a recent RANGEDEL to peek
710 : // below an older SINGLEDEL and delete an arbitrary subset of data below
711 : // that SINGLEDEL. When that SINGLEDEL gets compacted (without the
712 : // RANGEDEL), any of these callbacks can happen, without it being a real
713 : // correctness problem.
714 : //
715 : // Example 1:
716 : // RANGEDEL [a, c)#10 in L0
717 : // SINGLEDEL b#5 in L1
718 : // SET b#3 in L6
719 : //
720 : // If the L6 file containing the SET is narrow and the L1 file containing
721 : // the SINGLEDEL is wide, a delete-only compaction can remove the file in
722 : // L2 before the SINGLEDEL is compacted down. Then when the SINGLEDEL is
723 : // compacted down, it will not find any SET to delete, resulting in the
724 : // ineffectual callback.
725 : //
726 : // Example 2:
727 : // RANGEDEL [a, z)#60 in L0
728 : // SINGLEDEL g#50 in L1
729 : // SET g#40 in L2
730 : // RANGEDEL [g,h)#30 in L3
731 : // SET g#20 in L6
732 : //
733 : // In this example, the two SETs represent the same user write, and the
734 : // RANGEDELs are caused by the CockroachDB range being dropped. That is,
735 : // the user wrote to g once, range was dropped, then added back, which
736 : // caused the SET again, then at some point g was validly deleted using a
737 : // SINGLEDEL, and then the range was dropped again. The older RANGEDEL can
738 : // get fragmented due to compactions it has been part of. Say this L3 file
739 : // containing the RANGEDEL is very narrow, while the L1, L2, L6 files are
740 : // wider than the RANGEDEL in L0. Then the RANGEDEL in L3 can be dropped
741 : // using a delete-only compaction, resulting in an LSM with state:
742 : //
743 : // RANGEDEL [a, z)#60 in L0
744 : // SINGLEDEL g#50 in L1
745 : // SET g#40 in L2
746 : // SET g#20 in L6
747 : //
748 : // A multi-level compaction involving L1, L2, L6 will cause the invariant
749 : // violation callback. This example doesn't need multi-level compactions:
750 : // say there was a Pebble snapshot at g#21 preventing g#20 from being
751 : // dropped when it meets g#40 in a compaction. That snapshot will not save
752 : // RANGEDEL [g,h)#30, so we can have:
753 : //
754 : // SINGLEDEL g#50 in L1
755 : // SET g#40, SET g#20 in L6
756 : //
757 : // And say the snapshot is removed and then the L1 and L6 compaction
758 : // happens, resulting in the invariant violation callback.
759 : //
760 : // TODO(sumeer): rename SingleDeleteInvariantViolationCallback to remove
761 : // the word "invariant".
762 :
763 : // IneffectualPointDeleteCallback is called in compactions/flushes if any
764 : // single delete is being elided without deleting a point set/merge.
765 : IneffectualSingleDeleteCallback func(userKey []byte)
766 :
767 : // SingleDeleteInvariantViolationCallback is called in compactions/flushes if any
768 : // single delete has consumed a Set/Merge, and there is another immediately older
769 : // Set/SetWithDelete/Merge. The user of Pebble has violated the invariant under
770 : // which SingleDelete can be used correctly.
771 : //
772 : // Consider the sequence SingleDelete#3, Set#2, Set#1. There are three
773 : // ways some of these keys can first meet in a compaction.
774 : //
775 : // - All 3 keys in the same compaction: this callback will detect the
776 : // violation.
777 : //
778 : // - SingleDelete#3, Set#2 meet in a compaction first: Both keys will
779 : // disappear. The violation will not be detected, and the DB will have
780 : // Set#1 which is likely incorrect (from the user's perspective).
781 : //
782 : // - Set#2, Set#1 meet in a compaction first: The output will be Set#2,
783 : // which will later be consumed by SingleDelete#3. The violation will
784 : // not be detected and the DB will be correct.
785 : SingleDeleteInvariantViolationCallback func(userKey []byte)
786 : }
787 :
788 : // Filters is a map from filter policy name to filter policy. It is used for
789 : // debugging tools which may be used on multiple databases configured with
790 : // different filter policies. It is not necessary to populate this filters
791 : // map during normal usage of a DB.
792 : Filters map[string]FilterPolicy
793 :
794 : // FlushDelayDeleteRange configures how long the database should wait before
795 : // forcing a flush of a memtable that contains a range deletion. Disk space
796 : // cannot be reclaimed until the range deletion is flushed. No automatic
797 : // flush occurs if zero.
798 : FlushDelayDeleteRange time.Duration
799 :
800 : // FlushDelayRangeKey configures how long the database should wait before
801 : // forcing a flush of a memtable that contains a range key. Range keys in
802 : // the memtable prevent lazy combined iteration, so it's desirable to flush
803 : // range keys promptly. No automatic flush occurs if zero.
804 : FlushDelayRangeKey time.Duration
805 :
806 : // FlushSplitBytes denotes the target number of bytes per sublevel in
807 : // each flush split interval (i.e. range between two flush split keys)
808 : // in L0 sstables. When set to zero, only a single sstable is generated
809 : // by each flush. When set to a non-zero value, flushes are split at
810 : // points to meet L0's TargetFileSize, any grandparent-related overlap
811 : // options, and at boundary keys of L0 flush split intervals (which are
812 : // targeted to contain around FlushSplitBytes bytes in each sublevel
813 : // between pairs of boundary keys). Splitting sstables during flush
814 : // allows increased compaction flexibility and concurrency when those
815 : // tables are compacted to lower levels.
816 : FlushSplitBytes int64
817 :
818 : // FormatMajorVersion sets the format of on-disk files. It is
819 : // recommended to set the format major version to an explicit
820 : // version, as the default may change over time.
821 : //
822 : // At Open if the existing database is formatted using a later
823 : // format major version that is known to this version of Pebble,
824 : // Pebble will continue to use the later format major version. If
825 : // the existing database's version is unknown, the caller may use
826 : // FormatMostCompatible and will be able to open the database
827 : // regardless of its actual version.
828 : //
829 : // If the existing database is formatted using a format major
830 : // version earlier than the one specified, Open will automatically
831 : // ratchet the database to the specified format major version.
832 : FormatMajorVersion FormatMajorVersion
833 :
834 : // FS provides the interface for persistent file storage.
835 : //
836 : // The default value uses the underlying operating system's file system.
837 : FS vfs.FS
838 :
839 : // Lock, if set, must be a database lock acquired through LockDirectory for
840 : // the same directory passed to Open. If provided, Open will skip locking
841 : // the directory. Closing the database will not release the lock, and it's
842 : // the responsibility of the caller to release the lock after closing the
843 : // database.
844 : //
845 : // Open will enforce that the Lock passed locks the same directory passed to
846 : // Open. Concurrent calls to Open using the same Lock are detected and
847 : // prohibited.
848 : Lock *Lock
849 :
850 : // The count of L0 files necessary to trigger an L0 compaction.
851 : L0CompactionFileThreshold int
852 :
853 : // The amount of L0 read-amplification necessary to trigger an L0 compaction.
854 : L0CompactionThreshold int
855 :
856 : // Hard limit on L0 read-amplification, computed as the number of L0
857 : // sublevels. Writes are stopped when this threshold is reached.
858 : L0StopWritesThreshold int
859 :
860 : // The maximum number of bytes for LBase. The base level is the level which
861 : // L0 is compacted into. The base level is determined dynamically based on
862 : // the existing data in the LSM. The maximum number of bytes for other levels
863 : // is computed dynamically based on the base level's maximum size. When the
864 : // maximum number of bytes for a level is exceeded, compaction is requested.
865 : LBaseMaxBytes int64
866 :
867 : // Per-level options. Options for at least one level must be specified. The
868 : // options for the last level are used for all subsequent levels.
869 : Levels []LevelOptions
870 :
871 : // LoggerAndTracer will be used, if non-nil, else Logger will be used and
872 : // tracing will be a noop.
873 :
874 : // Logger used to write log messages.
875 : //
876 : // The default logger uses the Go standard library log package.
877 : Logger Logger
878 : // LoggerAndTracer is used for writing log messages and traces.
879 : LoggerAndTracer LoggerAndTracer
880 :
881 : // MaxManifestFileSize is the maximum size the MANIFEST file is allowed to
882 : // become. When the MANIFEST exceeds this size it is rolled over and a new
883 : // MANIFEST is created.
884 : MaxManifestFileSize int64
885 :
886 : // MaxOpenFiles is a soft limit on the number of open files that can be
887 : // used by the DB.
888 : //
889 : // The default value is 1000.
890 : MaxOpenFiles int
891 :
892 : // The size of a MemTable in steady state. The actual MemTable size starts at
893 : // min(256KB, MemTableSize) and doubles for each subsequent MemTable up to
894 : // MemTableSize. This reduces the memory pressure caused by MemTables for
895 : // short lived (test) DB instances. Note that more than one MemTable can be
896 : // in existence since flushing a MemTable involves creating a new one and
897 : // writing the contents of the old one in the
898 : // background. MemTableStopWritesThreshold places a hard limit on the size of
899 : // the queued MemTables.
900 : //
901 : // The default value is 4MB.
902 : MemTableSize uint64
903 :
904 : // Hard limit on the number of queued of MemTables. Writes are stopped when
905 : // the sum of the queued memtable sizes exceeds:
906 : // MemTableStopWritesThreshold * MemTableSize.
907 : //
908 : // This value should be at least 2 or writes will stop whenever a MemTable is
909 : // being flushed.
910 : //
911 : // The default value is 2.
912 : MemTableStopWritesThreshold int
913 :
914 : // Merger defines the associative merge operation to use for merging values
915 : // written with {Batch,DB}.Merge.
916 : //
917 : // The default merger concatenates values.
918 : Merger *Merger
919 :
920 : // MaxConcurrentCompactions specifies the maximum number of concurrent
921 : // compactions (not including download compactions).
922 : //
923 : // Concurrent compactions are performed:
924 : // - when L0 read-amplification passes the L0CompactionConcurrency threshold;
925 : // - for automatic background compactions;
926 : // - when a manual compaction for a level is split and parallelized.
927 : //
928 : // MaxConcurrentCompactions() must be greater than 0.
929 : //
930 : // The default value is 1.
931 : MaxConcurrentCompactions func() int
932 :
933 : // MaxConcurrentDownloads specifies the maximum number of download
934 : // compactions. These are compactions that copy an external file to the local
935 : // store.
936 : //
937 : // This limit is independent of MaxConcurrentCompactions; at any point in
938 : // time, we may be running MaxConcurrentCompactions non-download compactions
939 : // and MaxConcurrentDownloads download compactions.
940 : //
941 : // MaxConcurrentDownloads() must be greater than 0.
942 : //
943 : // The default value is 1.
944 : MaxConcurrentDownloads func() int
945 :
946 : // DisableAutomaticCompactions dictates whether automatic compactions are
947 : // scheduled or not. The default is false (enabled). This option is only used
948 : // externally when running a manual compaction, and internally for tests.
949 : DisableAutomaticCompactions bool
950 :
951 : // DisableTableStats dictates whether tables should be loaded asynchronously
952 : // to compute statistics that inform compaction heuristics. The collection
953 : // of table stats improves compaction of tombstones, reclaiming disk space
954 : // more quickly and in some cases reducing write amplification in the
955 : // presence of tombstones. Disabling table stats may be useful in tests
956 : // that require determinism as the asynchronicity of table stats collection
957 : // introduces significant nondeterminism.
958 : DisableTableStats bool
959 :
960 : // NoSyncOnClose decides whether the Pebble instance will enforce a
961 : // close-time synchronization (e.g., fdatasync() or sync_file_range())
962 : // on files it writes to. Setting this to true removes the guarantee for a
963 : // sync on close. Some implementations can still issue a non-blocking sync.
964 : NoSyncOnClose bool
965 :
966 : // NumPrevManifest is the number of non-current or older manifests which
967 : // we want to keep around for debugging purposes. By default, we're going
968 : // to keep one older manifest.
969 : NumPrevManifest int
970 :
971 : // ReadOnly indicates that the DB should be opened in read-only mode. Writes
972 : // to the DB will return an error, background compactions are disabled, and
973 : // the flush that normally occurs after replaying the WAL at startup is
974 : // disabled.
975 : ReadOnly bool
976 :
977 : // TableCache is an initialized TableCache which should be set as an
978 : // option if the DB needs to be initialized with a pre-existing table cache.
979 : // If TableCache is nil, then a table cache which is unique to the DB instance
980 : // is created. TableCache can be shared between db instances by setting it here.
981 : // The TableCache set here must use the same underlying cache as Options.Cache
982 : // and pebble will panic otherwise.
983 : TableCache *TableCache
984 :
985 : // BlockPropertyCollectors is a list of BlockPropertyCollector creation
986 : // functions. A new BlockPropertyCollector is created for each sstable
987 : // built and lives for the lifetime of writing that table.
988 : BlockPropertyCollectors []func() BlockPropertyCollector
989 :
990 : // WALBytesPerSync sets the number of bytes to write to a WAL before calling
991 : // Sync on it in the background. Just like with BytesPerSync above, this
992 : // helps smooth out disk write latencies, and avoids cases where the OS
993 : // writes a lot of buffered data to disk at once. However, this is less
994 : // necessary with WALs, as many write operations already pass in
995 : // Sync = true.
996 : //
997 : // The default value is 0, i.e. no background syncing. This matches the
998 : // default behaviour in RocksDB.
999 : WALBytesPerSync int
1000 :
1001 : // WALDir specifies the directory to store write-ahead logs (WALs) in. If
1002 : // empty (the default), WALs will be stored in the same directory as sstables
1003 : // (i.e. the directory passed to pebble.Open).
1004 : WALDir string
1005 :
1006 : // WALFailover may be set to configure Pebble to monitor writes to its
1007 : // write-ahead log and failover to writing write-ahead log entries to a
1008 : // secondary location (eg, a separate physical disk). WALFailover may be
1009 : // used to improve write availability in the presence of transient disk
1010 : // unavailability.
1011 : WALFailover *WALFailoverOptions
1012 :
1013 : // WALRecoveryDirs is a list of additional directories that should be
1014 : // scanned for the existence of additional write-ahead logs. WALRecoveryDirs
1015 : // is expected to be used when starting Pebble with a new WALDir or a new
1016 : // WALFailover configuration. The directories associated with the previous
1017 : // configuration may still contain WALs that are required for recovery of
1018 : // the current database state.
1019 : //
1020 : // If a previous WAL configuration may have stored WALs elsewhere but there
1021 : // is not a corresponding entry in WALRecoveryDirs, Open will error.
1022 : WALRecoveryDirs []wal.Dir
1023 :
1024 : // WALMinSyncInterval is the minimum duration between syncs of the WAL. If
1025 : // WAL syncs are requested faster than this interval, they will be
1026 : // artificially delayed. Introducing a small artificial delay (500us) between
1027 : // WAL syncs can allow more operations to arrive and reduce IO operations
1028 : // while having a minimal impact on throughput. This option is supplied as a
1029 : // closure in order to allow the value to be changed dynamically. The default
1030 : // value is 0.
1031 : //
1032 : // TODO(peter): rather than a closure, should there be another mechanism for
1033 : // changing options dynamically?
1034 : WALMinSyncInterval func() time.Duration
1035 :
1036 : // TargetByteDeletionRate is the rate (in bytes per second) at which sstable file
1037 : // deletions are limited to (under normal circumstances).
1038 : //
1039 : // Deletion pacing is used to slow down deletions when compactions finish up
1040 : // or readers close and newly-obsolete files need cleaning up. Deleting lots
1041 : // of files at once can cause disk latency to go up on some SSDs, which this
1042 : // functionality guards against.
1043 : //
1044 : // This value is only a best-effort target; the effective rate can be
1045 : // higher if deletions are falling behind or disk space is running low.
1046 : //
1047 : // Setting this to 0 disables deletion pacing, which is also the default.
1048 : TargetByteDeletionRate int
1049 :
1050 : // private options are only used by internal tests or are used internally
1051 : // for facilitating upgrade paths of unconfigurable functionality.
1052 : private struct {
1053 : // disableDeleteOnlyCompactions prevents the scheduling of delete-only
1054 : // compactions that drop sstables wholy covered by range tombstones or
1055 : // range key tombstones.
1056 : disableDeleteOnlyCompactions bool
1057 :
1058 : // disableElisionOnlyCompactions prevents the scheduling of elision-only
1059 : // compactions that rewrite sstables in place in order to elide obsolete
1060 : // keys.
1061 : disableElisionOnlyCompactions bool
1062 :
1063 : // disableLazyCombinedIteration is a private option used by the
1064 : // metamorphic tests to test equivalence between lazy-combined iteration
1065 : // and constructing the range-key iterator upfront. It's a private
1066 : // option to avoid littering the public interface with options that we
1067 : // do not want to allow users to actually configure.
1068 : disableLazyCombinedIteration bool
1069 :
1070 : // testingAlwaysWaitForCleanup is set by some tests to force waiting for
1071 : // obsolete file deletion (to make events deterministic).
1072 : testingAlwaysWaitForCleanup bool
1073 :
1074 : // fsCloser holds a closer that should be invoked after a DB using these
1075 : // Options is closed. This is used to automatically stop the
1076 : // long-running goroutine associated with the disk-health-checking FS.
1077 : // See the initialization of FS in EnsureDefaults. Note that care has
1078 : // been taken to ensure that it is still safe to continue using the FS
1079 : // after this closer has been invoked. However, if write operations
1080 : // against the FS are made after the DB is closed, the FS may leak a
1081 : // goroutine indefinitely.
1082 : fsCloser io.Closer
1083 : }
1084 : }
1085 :
1086 : // WALFailoverOptions configures the WAL failover mechanics to use during
1087 : // transient write unavailability on the primary WAL volume.
1088 : type WALFailoverOptions struct {
1089 : // Secondary indicates the secondary directory and VFS to use in the event a
1090 : // write to the primary WAL stalls.
1091 : Secondary wal.Dir
1092 : // FailoverOptions provides configuration of the thresholds and intervals
1093 : // involved in WAL failover. If any of its fields are left unspecified,
1094 : // reasonable defaults will be used.
1095 : wal.FailoverOptions
1096 : }
1097 :
1098 : // DebugCheckLevels calls CheckLevels on the provided database.
1099 : // It may be set in the DebugCheck field of Options to check
1100 : // level invariants whenever a new version is installed.
1101 1 : func DebugCheckLevels(db *DB) error {
1102 1 : return db.CheckLevels(nil)
1103 1 : }
1104 :
1105 : // EnsureDefaults ensures that the default values for all options are set if a
1106 : // valid value was not already specified. Returns the new options.
1107 1 : func (o *Options) EnsureDefaults() *Options {
1108 1 : if o == nil {
1109 1 : o = &Options{}
1110 1 : }
1111 1 : o.Comparer = o.Comparer.EnsureDefaults()
1112 1 :
1113 1 : if o.BytesPerSync <= 0 {
1114 1 : o.BytesPerSync = 512 << 10 // 512 KB
1115 1 : }
1116 1 : if o.Cleaner == nil {
1117 1 : o.Cleaner = DeleteCleaner{}
1118 1 : }
1119 :
1120 1 : if o.Experimental.DisableIngestAsFlushable == nil {
1121 1 : o.Experimental.DisableIngestAsFlushable = func() bool { return false }
1122 : }
1123 1 : if o.Experimental.L0CompactionConcurrency <= 0 {
1124 1 : o.Experimental.L0CompactionConcurrency = 10
1125 1 : }
1126 1 : if o.Experimental.CompactionDebtConcurrency <= 0 {
1127 1 : o.Experimental.CompactionDebtConcurrency = 1 << 30 // 1 GB
1128 1 : }
1129 1 : if o.Experimental.KeyValidationFunc == nil {
1130 1 : o.Experimental.KeyValidationFunc = func([]byte) error { return nil }
1131 : }
1132 1 : if o.L0CompactionThreshold <= 0 {
1133 1 : o.L0CompactionThreshold = 4
1134 1 : }
1135 1 : if o.L0CompactionFileThreshold <= 0 {
1136 1 : // Some justification for the default of 500:
1137 1 : // Why not smaller?:
1138 1 : // - The default target file size for L0 is 2MB, so 500 files is <= 1GB
1139 1 : // of data. At observed compaction speeds of > 20MB/s, L0 can be
1140 1 : // cleared of all files in < 1min, so this backlog is not huge.
1141 1 : // - 500 files is low overhead for instantiating L0 sublevels from
1142 1 : // scratch.
1143 1 : // - Lower values were observed to cause excessive and inefficient
1144 1 : // compactions out of L0 in a TPCC import benchmark.
1145 1 : // Why not larger?:
1146 1 : // - More than 1min to compact everything out of L0.
1147 1 : // - CockroachDB's admission control system uses a threshold of 1000
1148 1 : // files to start throttling writes to Pebble. Using 500 here gives
1149 1 : // us headroom between when Pebble should start compacting L0 and
1150 1 : // when the admission control threshold is reached.
1151 1 : //
1152 1 : // We can revisit this default in the future based on better
1153 1 : // experimental understanding.
1154 1 : //
1155 1 : // TODO(jackson): Experiment with slightly lower thresholds [or higher
1156 1 : // admission control thresholds] to see whether a higher L0 score at the
1157 1 : // threshold (currently 2.0) is necessary for some workloads to avoid
1158 1 : // starving L0 in favor of lower-level compactions.
1159 1 : o.L0CompactionFileThreshold = 500
1160 1 : }
1161 1 : if o.L0StopWritesThreshold <= 0 {
1162 1 : o.L0StopWritesThreshold = 12
1163 1 : }
1164 1 : if o.LBaseMaxBytes <= 0 {
1165 1 : o.LBaseMaxBytes = 64 << 20 // 64 MB
1166 1 : }
1167 1 : if o.Levels == nil {
1168 1 : o.Levels = make([]LevelOptions, 1)
1169 1 : for i := range o.Levels {
1170 1 : if i > 0 {
1171 0 : l := &o.Levels[i]
1172 0 : if l.TargetFileSize <= 0 {
1173 0 : l.TargetFileSize = o.Levels[i-1].TargetFileSize * 2
1174 0 : }
1175 : }
1176 1 : o.Levels[i].EnsureDefaults()
1177 : }
1178 1 : } else {
1179 1 : for i := range o.Levels {
1180 1 : o.Levels[i].EnsureDefaults()
1181 1 : }
1182 : }
1183 1 : if o.Logger == nil {
1184 1 : o.Logger = DefaultLogger
1185 1 : }
1186 1 : if o.EventListener == nil {
1187 1 : o.EventListener = &EventListener{}
1188 1 : }
1189 1 : o.EventListener.EnsureDefaults(o.Logger)
1190 1 : if o.MaxManifestFileSize == 0 {
1191 1 : o.MaxManifestFileSize = 128 << 20 // 128 MB
1192 1 : }
1193 1 : if o.MaxOpenFiles == 0 {
1194 1 : o.MaxOpenFiles = 1000
1195 1 : }
1196 1 : if o.MemTableSize <= 0 {
1197 1 : o.MemTableSize = 4 << 20 // 4 MB
1198 1 : }
1199 1 : if o.MemTableStopWritesThreshold <= 0 {
1200 1 : o.MemTableStopWritesThreshold = 2
1201 1 : }
1202 1 : if o.Merger == nil {
1203 1 : o.Merger = DefaultMerger
1204 1 : }
1205 1 : if o.MaxConcurrentCompactions == nil {
1206 1 : o.MaxConcurrentCompactions = func() int { return 1 }
1207 : }
1208 1 : if o.MaxConcurrentDownloads == nil {
1209 1 : o.MaxConcurrentDownloads = func() int { return 1 }
1210 : }
1211 1 : if o.NumPrevManifest <= 0 {
1212 1 : o.NumPrevManifest = 1
1213 1 : }
1214 :
1215 1 : if o.FormatMajorVersion == FormatDefault {
1216 1 : o.FormatMajorVersion = FormatMinSupported
1217 1 : if o.Experimental.CreateOnShared != remote.CreateOnSharedNone {
1218 1 : o.FormatMajorVersion = FormatMinForSharedObjects
1219 1 : }
1220 : }
1221 :
1222 1 : if o.FS == nil {
1223 1 : o.WithFSDefaults()
1224 1 : }
1225 1 : if o.FlushSplitBytes <= 0 {
1226 1 : o.FlushSplitBytes = 2 * o.Levels[0].TargetFileSize
1227 1 : }
1228 1 : if o.WALFailover != nil {
1229 1 : o.WALFailover.FailoverOptions.EnsureDefaults()
1230 1 : }
1231 1 : if o.Experimental.LevelMultiplier <= 0 {
1232 1 : o.Experimental.LevelMultiplier = defaultLevelMultiplier
1233 1 : }
1234 1 : if o.Experimental.ReadCompactionRate == 0 {
1235 1 : o.Experimental.ReadCompactionRate = 16000
1236 1 : }
1237 1 : if o.Experimental.ReadSamplingMultiplier == 0 {
1238 1 : o.Experimental.ReadSamplingMultiplier = 1 << 4
1239 1 : }
1240 1 : if o.Experimental.TableCacheShards <= 0 {
1241 1 : o.Experimental.TableCacheShards = runtime.GOMAXPROCS(0)
1242 1 : }
1243 1 : if o.Experimental.CPUWorkPermissionGranter == nil {
1244 1 : o.Experimental.CPUWorkPermissionGranter = defaultCPUWorkGranter{}
1245 1 : }
1246 1 : if o.Experimental.MultiLevelCompactionHeuristic == nil {
1247 1 : o.Experimental.MultiLevelCompactionHeuristic = WriteAmpHeuristic{}
1248 1 : }
1249 :
1250 1 : o.initMaps()
1251 1 : return o
1252 : }
1253 :
1254 : // WithFSDefaults configures the Options to wrap the configured filesystem with
1255 : // the default virtual file system middleware, like disk-health checking.
1256 1 : func (o *Options) WithFSDefaults() *Options {
1257 1 : if o.FS == nil {
1258 1 : o.FS = vfs.Default
1259 1 : }
1260 1 : o.FS, o.private.fsCloser = vfs.WithDiskHealthChecks(o.FS, 5*time.Second, nil,
1261 1 : func(info vfs.DiskSlowInfo) {
1262 0 : o.EventListener.DiskSlow(info)
1263 0 : })
1264 1 : return o
1265 : }
1266 :
1267 : // AddEventListener adds the provided event listener to the Options, in addition
1268 : // to any existing event listener.
1269 1 : func (o *Options) AddEventListener(l EventListener) {
1270 1 : if o.EventListener != nil {
1271 1 : l = TeeEventListener(l, *o.EventListener)
1272 1 : }
1273 1 : o.EventListener = &l
1274 : }
1275 :
1276 : // initMaps initializes the Comparers, Filters, and Mergers maps.
1277 1 : func (o *Options) initMaps() {
1278 1 : for i := range o.Levels {
1279 1 : l := &o.Levels[i]
1280 1 : if l.FilterPolicy != nil {
1281 1 : if o.Filters == nil {
1282 1 : o.Filters = make(map[string]FilterPolicy)
1283 1 : }
1284 1 : name := l.FilterPolicy.Name()
1285 1 : if _, ok := o.Filters[name]; !ok {
1286 1 : o.Filters[name] = l.FilterPolicy
1287 1 : }
1288 : }
1289 : }
1290 : }
1291 :
1292 : // Level returns the LevelOptions for the specified level.
1293 1 : func (o *Options) Level(level int) LevelOptions {
1294 1 : if level < len(o.Levels) {
1295 1 : return o.Levels[level]
1296 1 : }
1297 1 : n := len(o.Levels) - 1
1298 1 : l := o.Levels[n]
1299 1 : for i := n; i < level; i++ {
1300 1 : l.TargetFileSize *= 2
1301 1 : }
1302 1 : return l
1303 : }
1304 :
1305 : // Clone creates a shallow-copy of the supplied options.
1306 1 : func (o *Options) Clone() *Options {
1307 1 : n := &Options{}
1308 1 : if o != nil {
1309 1 : *n = *o
1310 1 : }
1311 1 : return n
1312 : }
1313 :
1314 1 : func filterPolicyName(p FilterPolicy) string {
1315 1 : if p == nil {
1316 1 : return "none"
1317 1 : }
1318 1 : return p.Name()
1319 : }
1320 :
1321 1 : func (o *Options) String() string {
1322 1 : var buf bytes.Buffer
1323 1 :
1324 1 : cacheSize := int64(cacheDefaultSize)
1325 1 : if o.Cache != nil {
1326 1 : cacheSize = o.Cache.MaxSize()
1327 1 : }
1328 :
1329 1 : fmt.Fprintf(&buf, "[Version]\n")
1330 1 : fmt.Fprintf(&buf, " pebble_version=0.1\n")
1331 1 : fmt.Fprintf(&buf, "\n")
1332 1 : fmt.Fprintf(&buf, "[Options]\n")
1333 1 : fmt.Fprintf(&buf, " bytes_per_sync=%d\n", o.BytesPerSync)
1334 1 : fmt.Fprintf(&buf, " cache_size=%d\n", cacheSize)
1335 1 : fmt.Fprintf(&buf, " cleaner=%s\n", o.Cleaner)
1336 1 : fmt.Fprintf(&buf, " compaction_debt_concurrency=%d\n", o.Experimental.CompactionDebtConcurrency)
1337 1 : fmt.Fprintf(&buf, " comparer=%s\n", o.Comparer.Name)
1338 1 : fmt.Fprintf(&buf, " disable_wal=%t\n", o.DisableWAL)
1339 1 : if o.Experimental.DisableIngestAsFlushable != nil && o.Experimental.DisableIngestAsFlushable() {
1340 1 : fmt.Fprintf(&buf, " disable_ingest_as_flushable=%t\n", true)
1341 1 : }
1342 1 : fmt.Fprintf(&buf, " flush_delay_delete_range=%s\n", o.FlushDelayDeleteRange)
1343 1 : fmt.Fprintf(&buf, " flush_delay_range_key=%s\n", o.FlushDelayRangeKey)
1344 1 : fmt.Fprintf(&buf, " flush_split_bytes=%d\n", o.FlushSplitBytes)
1345 1 : fmt.Fprintf(&buf, " format_major_version=%d\n", o.FormatMajorVersion)
1346 1 : fmt.Fprintf(&buf, " l0_compaction_concurrency=%d\n", o.Experimental.L0CompactionConcurrency)
1347 1 : fmt.Fprintf(&buf, " l0_compaction_file_threshold=%d\n", o.L0CompactionFileThreshold)
1348 1 : fmt.Fprintf(&buf, " l0_compaction_threshold=%d\n", o.L0CompactionThreshold)
1349 1 : fmt.Fprintf(&buf, " l0_stop_writes_threshold=%d\n", o.L0StopWritesThreshold)
1350 1 : fmt.Fprintf(&buf, " lbase_max_bytes=%d\n", o.LBaseMaxBytes)
1351 1 : if o.Experimental.LevelMultiplier != defaultLevelMultiplier {
1352 1 : fmt.Fprintf(&buf, " level_multiplier=%d\n", o.Experimental.LevelMultiplier)
1353 1 : }
1354 1 : fmt.Fprintf(&buf, " max_concurrent_compactions=%d\n", o.MaxConcurrentCompactions())
1355 1 : fmt.Fprintf(&buf, " max_concurrent_downloads=%d\n", o.MaxConcurrentDownloads())
1356 1 : fmt.Fprintf(&buf, " max_manifest_file_size=%d\n", o.MaxManifestFileSize)
1357 1 : fmt.Fprintf(&buf, " max_open_files=%d\n", o.MaxOpenFiles)
1358 1 : fmt.Fprintf(&buf, " mem_table_size=%d\n", o.MemTableSize)
1359 1 : fmt.Fprintf(&buf, " mem_table_stop_writes_threshold=%d\n", o.MemTableStopWritesThreshold)
1360 1 : fmt.Fprintf(&buf, " min_deletion_rate=%d\n", o.TargetByteDeletionRate)
1361 1 : fmt.Fprintf(&buf, " merger=%s\n", o.Merger.Name)
1362 1 : if o.Experimental.MultiLevelCompactionHeuristic != nil {
1363 1 : fmt.Fprintf(&buf, " multilevel_compaction_heuristic=%s\n", o.Experimental.MultiLevelCompactionHeuristic.String())
1364 1 : }
1365 1 : fmt.Fprintf(&buf, " read_compaction_rate=%d\n", o.Experimental.ReadCompactionRate)
1366 1 : fmt.Fprintf(&buf, " read_sampling_multiplier=%d\n", o.Experimental.ReadSamplingMultiplier)
1367 1 : // We no longer care about strict_wal_tail, but set it to true in case an
1368 1 : // older version reads the options.
1369 1 : fmt.Fprintf(&buf, " strict_wal_tail=%t\n", true)
1370 1 : fmt.Fprintf(&buf, " table_cache_shards=%d\n", o.Experimental.TableCacheShards)
1371 1 : fmt.Fprintf(&buf, " validate_on_ingest=%t\n", o.Experimental.ValidateOnIngest)
1372 1 : fmt.Fprintf(&buf, " wal_dir=%s\n", o.WALDir)
1373 1 : fmt.Fprintf(&buf, " wal_bytes_per_sync=%d\n", o.WALBytesPerSync)
1374 1 : fmt.Fprintf(&buf, " max_writer_concurrency=%d\n", o.Experimental.MaxWriterConcurrency)
1375 1 : fmt.Fprintf(&buf, " force_writer_parallelism=%t\n", o.Experimental.ForceWriterParallelism)
1376 1 : fmt.Fprintf(&buf, " secondary_cache_size_bytes=%d\n", o.Experimental.SecondaryCacheSizeBytes)
1377 1 : fmt.Fprintf(&buf, " create_on_shared=%d\n", o.Experimental.CreateOnShared)
1378 1 :
1379 1 : // Private options.
1380 1 : //
1381 1 : // These options are only encoded if true, because we do not want them to
1382 1 : // appear in production serialized Options files, since they're testing-only
1383 1 : // options. They're only serialized when true, which still ensures that the
1384 1 : // metamorphic tests may propagate them to subprocesses.
1385 1 : if o.private.disableDeleteOnlyCompactions {
1386 1 : fmt.Fprintln(&buf, " disable_delete_only_compactions=true")
1387 1 : }
1388 1 : if o.private.disableElisionOnlyCompactions {
1389 1 : fmt.Fprintln(&buf, " disable_elision_only_compactions=true")
1390 1 : }
1391 1 : if o.private.disableLazyCombinedIteration {
1392 1 : fmt.Fprintln(&buf, " disable_lazy_combined_iteration=true")
1393 1 : }
1394 :
1395 1 : if o.WALFailover != nil {
1396 1 : unhealthyThreshold, _ := o.WALFailover.FailoverOptions.UnhealthyOperationLatencyThreshold()
1397 1 : fmt.Fprintf(&buf, "\n")
1398 1 : fmt.Fprintf(&buf, "[WAL Failover]\n")
1399 1 : fmt.Fprintf(&buf, " secondary_dir=%s\n", o.WALFailover.Secondary.Dirname)
1400 1 : fmt.Fprintf(&buf, " primary_dir_probe_interval=%s\n", o.WALFailover.FailoverOptions.PrimaryDirProbeInterval)
1401 1 : fmt.Fprintf(&buf, " healthy_probe_latency_threshold=%s\n", o.WALFailover.FailoverOptions.HealthyProbeLatencyThreshold)
1402 1 : fmt.Fprintf(&buf, " healthy_interval=%s\n", o.WALFailover.FailoverOptions.HealthyInterval)
1403 1 : fmt.Fprintf(&buf, " unhealthy_sampling_interval=%s\n", o.WALFailover.FailoverOptions.UnhealthySamplingInterval)
1404 1 : fmt.Fprintf(&buf, " unhealthy_operation_latency_threshold=%s\n", unhealthyThreshold)
1405 1 : fmt.Fprintf(&buf, " elevated_write_stall_threshold_lag=%s\n", o.WALFailover.FailoverOptions.ElevatedWriteStallThresholdLag)
1406 1 : }
1407 :
1408 1 : for i := range o.Levels {
1409 1 : l := &o.Levels[i]
1410 1 : fmt.Fprintf(&buf, "\n")
1411 1 : fmt.Fprintf(&buf, "[Level \"%d\"]\n", i)
1412 1 : fmt.Fprintf(&buf, " block_restart_interval=%d\n", l.BlockRestartInterval)
1413 1 : fmt.Fprintf(&buf, " block_size=%d\n", l.BlockSize)
1414 1 : fmt.Fprintf(&buf, " block_size_threshold=%d\n", l.BlockSizeThreshold)
1415 1 : fmt.Fprintf(&buf, " compression=%s\n", resolveDefaultCompression(l.Compression()))
1416 1 : fmt.Fprintf(&buf, " filter_policy=%s\n", filterPolicyName(l.FilterPolicy))
1417 1 : fmt.Fprintf(&buf, " filter_type=%s\n", l.FilterType)
1418 1 : fmt.Fprintf(&buf, " index_block_size=%d\n", l.IndexBlockSize)
1419 1 : fmt.Fprintf(&buf, " target_file_size=%d\n", l.TargetFileSize)
1420 1 : }
1421 :
1422 1 : return buf.String()
1423 : }
1424 :
1425 : // parseOptions takes options serialized by Options.String() and parses them into
1426 : // keys and values, calling fn for each one.
1427 1 : func parseOptions(s string, fn func(section, key, value string) error) error {
1428 1 : var section string
1429 1 : for _, line := range strings.Split(s, "\n") {
1430 1 : line = strings.TrimSpace(line)
1431 1 : if len(line) == 0 {
1432 1 : // Skip blank lines.
1433 1 : continue
1434 : }
1435 1 : if line[0] == ';' || line[0] == '#' {
1436 0 : // Skip comments.
1437 0 : continue
1438 : }
1439 1 : n := len(line)
1440 1 : if line[0] == '[' && line[n-1] == ']' {
1441 1 : // Parse section.
1442 1 : section = line[1 : n-1]
1443 1 : continue
1444 : }
1445 :
1446 1 : pos := strings.Index(line, "=")
1447 1 : if pos < 0 {
1448 1 : const maxLen = 50
1449 1 : if len(line) > maxLen {
1450 0 : line = line[:maxLen-3] + "..."
1451 0 : }
1452 1 : return base.CorruptionErrorf("invalid key=value syntax: %q", errors.Safe(line))
1453 : }
1454 :
1455 1 : key := strings.TrimSpace(line[:pos])
1456 1 : value := strings.TrimSpace(line[pos+1:])
1457 1 :
1458 1 : // RocksDB uses a similar (INI-style) syntax for the OPTIONS file, but
1459 1 : // different section names and keys. The "CFOptions ..." paths are the
1460 1 : // RocksDB versions which we map to the Pebble paths.
1461 1 : mappedSection := section
1462 1 : if section == `CFOptions "default"` {
1463 1 : mappedSection = "Options"
1464 1 : switch key {
1465 1 : case "comparator":
1466 1 : key = "comparer"
1467 1 : case "merge_operator":
1468 1 : key = "merger"
1469 : }
1470 : }
1471 :
1472 1 : if err := fn(mappedSection, key, value); err != nil {
1473 1 : return err
1474 1 : }
1475 : }
1476 1 : return nil
1477 : }
1478 :
1479 : // ParseHooks contains callbacks to create options fields which can have
1480 : // user-defined implementations.
1481 : type ParseHooks struct {
1482 : NewCache func(size int64) *Cache
1483 : NewCleaner func(name string) (Cleaner, error)
1484 : NewComparer func(name string) (*Comparer, error)
1485 : NewFilterPolicy func(name string) (FilterPolicy, error)
1486 : NewMerger func(name string) (*Merger, error)
1487 : SkipUnknown func(name, value string) bool
1488 : }
1489 :
1490 : // Parse parses the options from the specified string. Note that certain
1491 : // options cannot be parsed into populated fields. For example, comparer and
1492 : // merger.
1493 1 : func (o *Options) Parse(s string, hooks *ParseHooks) error {
1494 1 : return parseOptions(s, func(section, key, value string) error {
1495 1 : // WARNING: DO NOT remove entries from the switches below because doing so
1496 1 : // causes a key previously written to the OPTIONS file to be considered unknown,
1497 1 : // a backwards incompatible change. Instead, leave in support for parsing the
1498 1 : // key but simply don't parse the value.
1499 1 :
1500 1 : switch {
1501 1 : case section == "Version":
1502 1 : switch key {
1503 1 : case "pebble_version":
1504 0 : default:
1505 0 : if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section+"."+key, value) {
1506 0 : return nil
1507 0 : }
1508 0 : return errors.Errorf("pebble: unknown option: %s.%s",
1509 0 : errors.Safe(section), errors.Safe(key))
1510 : }
1511 1 : return nil
1512 :
1513 1 : case section == "Options":
1514 1 : var err error
1515 1 : switch key {
1516 1 : case "bytes_per_sync":
1517 1 : o.BytesPerSync, err = strconv.Atoi(value)
1518 1 : case "cache_size":
1519 1 : var n int64
1520 1 : n, err = strconv.ParseInt(value, 10, 64)
1521 1 : if err == nil && hooks != nil && hooks.NewCache != nil {
1522 1 : if o.Cache != nil {
1523 0 : o.Cache.Unref()
1524 0 : }
1525 1 : o.Cache = hooks.NewCache(n)
1526 : }
1527 : // We avoid calling cache.New in parsing because it makes it
1528 : // too easy to leak a cache.
1529 1 : case "cleaner":
1530 1 : switch value {
1531 1 : case "archive":
1532 1 : o.Cleaner = ArchiveCleaner{}
1533 1 : case "delete":
1534 1 : o.Cleaner = DeleteCleaner{}
1535 0 : default:
1536 0 : if hooks != nil && hooks.NewCleaner != nil {
1537 0 : o.Cleaner, err = hooks.NewCleaner(value)
1538 0 : }
1539 : }
1540 1 : case "comparer":
1541 1 : switch value {
1542 1 : case "leveldb.BytewiseComparator":
1543 1 : o.Comparer = DefaultComparer
1544 1 : default:
1545 1 : if hooks != nil && hooks.NewComparer != nil {
1546 1 : o.Comparer, err = hooks.NewComparer(value)
1547 1 : }
1548 : }
1549 1 : case "compaction_debt_concurrency":
1550 1 : o.Experimental.CompactionDebtConcurrency, err = strconv.ParseUint(value, 10, 64)
1551 0 : case "delete_range_flush_delay":
1552 0 : // NB: This is a deprecated serialization of the
1553 0 : // `flush_delay_delete_range`.
1554 0 : o.FlushDelayDeleteRange, err = time.ParseDuration(value)
1555 1 : case "disable_delete_only_compactions":
1556 1 : o.private.disableDeleteOnlyCompactions, err = strconv.ParseBool(value)
1557 1 : case "disable_elision_only_compactions":
1558 1 : o.private.disableElisionOnlyCompactions, err = strconv.ParseBool(value)
1559 1 : case "disable_ingest_as_flushable":
1560 1 : var v bool
1561 1 : v, err = strconv.ParseBool(value)
1562 1 : if err == nil {
1563 1 : o.Experimental.DisableIngestAsFlushable = func() bool { return v }
1564 : }
1565 1 : case "disable_lazy_combined_iteration":
1566 1 : o.private.disableLazyCombinedIteration, err = strconv.ParseBool(value)
1567 1 : case "disable_wal":
1568 1 : o.DisableWAL, err = strconv.ParseBool(value)
1569 1 : case "flush_delay_delete_range":
1570 1 : o.FlushDelayDeleteRange, err = time.ParseDuration(value)
1571 1 : case "flush_delay_range_key":
1572 1 : o.FlushDelayRangeKey, err = time.ParseDuration(value)
1573 1 : case "flush_split_bytes":
1574 1 : o.FlushSplitBytes, err = strconv.ParseInt(value, 10, 64)
1575 1 : case "format_major_version":
1576 1 : // NB: The version written here may be stale. Open does
1577 1 : // not use the format major version encoded in the
1578 1 : // OPTIONS file other than to validate that the encoded
1579 1 : // version is valid right here.
1580 1 : var v uint64
1581 1 : v, err = strconv.ParseUint(value, 10, 64)
1582 1 : if vers := FormatMajorVersion(v); vers > internalFormatNewest || vers == FormatDefault {
1583 0 : err = errors.Newf("unsupported format major version %d", o.FormatMajorVersion)
1584 0 : }
1585 1 : if err == nil {
1586 1 : o.FormatMajorVersion = FormatMajorVersion(v)
1587 1 : }
1588 1 : case "l0_compaction_concurrency":
1589 1 : o.Experimental.L0CompactionConcurrency, err = strconv.Atoi(value)
1590 1 : case "l0_compaction_file_threshold":
1591 1 : o.L0CompactionFileThreshold, err = strconv.Atoi(value)
1592 1 : case "l0_compaction_threshold":
1593 1 : o.L0CompactionThreshold, err = strconv.Atoi(value)
1594 1 : case "l0_stop_writes_threshold":
1595 1 : o.L0StopWritesThreshold, err = strconv.Atoi(value)
1596 0 : case "l0_sublevel_compactions":
1597 : // Do nothing; option existed in older versions of pebble.
1598 1 : case "lbase_max_bytes":
1599 1 : o.LBaseMaxBytes, err = strconv.ParseInt(value, 10, 64)
1600 1 : case "level_multiplier":
1601 1 : o.Experimental.LevelMultiplier, err = strconv.Atoi(value)
1602 1 : case "max_concurrent_compactions":
1603 1 : var concurrentCompactions int
1604 1 : concurrentCompactions, err = strconv.Atoi(value)
1605 1 : if concurrentCompactions <= 0 {
1606 0 : err = errors.New("max_concurrent_compactions cannot be <= 0")
1607 1 : } else {
1608 1 : o.MaxConcurrentCompactions = func() int { return concurrentCompactions }
1609 : }
1610 1 : case "max_concurrent_downloads":
1611 1 : var concurrentDownloads int
1612 1 : concurrentDownloads, err = strconv.Atoi(value)
1613 1 : if concurrentDownloads <= 0 {
1614 0 : err = errors.New("max_concurrent_compactions cannot be <= 0")
1615 1 : } else {
1616 1 : o.MaxConcurrentDownloads = func() int { return concurrentDownloads }
1617 : }
1618 1 : case "max_manifest_file_size":
1619 1 : o.MaxManifestFileSize, err = strconv.ParseInt(value, 10, 64)
1620 1 : case "max_open_files":
1621 1 : o.MaxOpenFiles, err = strconv.Atoi(value)
1622 1 : case "mem_table_size":
1623 1 : o.MemTableSize, err = strconv.ParseUint(value, 10, 64)
1624 1 : case "mem_table_stop_writes_threshold":
1625 1 : o.MemTableStopWritesThreshold, err = strconv.Atoi(value)
1626 0 : case "min_compaction_rate":
1627 : // Do nothing; option existed in older versions of pebble, and
1628 : // may be meaningful again eventually.
1629 1 : case "min_deletion_rate":
1630 1 : o.TargetByteDeletionRate, err = strconv.Atoi(value)
1631 0 : case "min_flush_rate":
1632 : // Do nothing; option existed in older versions of pebble, and
1633 : // may be meaningful again eventually.
1634 1 : case "multilevel_compaction_heuristic":
1635 1 : switch {
1636 1 : case value == "none":
1637 1 : o.Experimental.MultiLevelCompactionHeuristic = NoMultiLevel{}
1638 1 : case strings.HasPrefix(value, "wamp"):
1639 1 : fields := strings.FieldsFunc(strings.TrimPrefix(value, "wamp"), func(r rune) bool {
1640 1 : return unicode.IsSpace(r) || r == ',' || r == '(' || r == ')'
1641 1 : })
1642 1 : if len(fields) != 2 {
1643 0 : err = errors.Newf("require 2 arguments")
1644 0 : }
1645 1 : var h WriteAmpHeuristic
1646 1 : if err == nil {
1647 1 : h.AddPropensity, err = strconv.ParseFloat(fields[0], 64)
1648 1 : }
1649 1 : if err == nil {
1650 1 : h.AllowL0, err = strconv.ParseBool(fields[1])
1651 1 : }
1652 1 : if err == nil {
1653 1 : o.Experimental.MultiLevelCompactionHeuristic = h
1654 1 : } else {
1655 0 : err = errors.Wrapf(err, "unexpected wamp heuristic arguments: %s", value)
1656 0 : }
1657 0 : default:
1658 0 : err = errors.Newf("unrecognized multilevel compaction heuristic: %s", value)
1659 : }
1660 0 : case "point_tombstone_weight":
1661 : // Do nothing; deprecated.
1662 1 : case "strict_wal_tail":
1663 1 : var strictWALTail bool
1664 1 : strictWALTail, err = strconv.ParseBool(value)
1665 1 : if err == nil && !strictWALTail {
1666 0 : err = errors.Newf("reading from versions with strict_wal_tail=false no longer supported")
1667 0 : }
1668 1 : case "merger":
1669 1 : switch value {
1670 0 : case "nullptr":
1671 0 : o.Merger = nil
1672 1 : case "pebble.concatenate":
1673 1 : o.Merger = DefaultMerger
1674 1 : default:
1675 1 : if hooks != nil && hooks.NewMerger != nil {
1676 1 : o.Merger, err = hooks.NewMerger(value)
1677 1 : }
1678 : }
1679 1 : case "read_compaction_rate":
1680 1 : o.Experimental.ReadCompactionRate, err = strconv.ParseInt(value, 10, 64)
1681 1 : case "read_sampling_multiplier":
1682 1 : o.Experimental.ReadSamplingMultiplier, err = strconv.ParseInt(value, 10, 64)
1683 1 : case "table_cache_shards":
1684 1 : o.Experimental.TableCacheShards, err = strconv.Atoi(value)
1685 0 : case "table_format":
1686 0 : switch value {
1687 0 : case "leveldb":
1688 0 : case "rocksdbv2":
1689 0 : default:
1690 0 : return errors.Errorf("pebble: unknown table format: %q", errors.Safe(value))
1691 : }
1692 1 : case "table_property_collectors":
1693 : // No longer implemented; ignore.
1694 1 : case "validate_on_ingest":
1695 1 : o.Experimental.ValidateOnIngest, err = strconv.ParseBool(value)
1696 1 : case "wal_dir":
1697 1 : o.WALDir = value
1698 1 : case "wal_bytes_per_sync":
1699 1 : o.WALBytesPerSync, err = strconv.Atoi(value)
1700 1 : case "max_writer_concurrency":
1701 1 : o.Experimental.MaxWriterConcurrency, err = strconv.Atoi(value)
1702 1 : case "force_writer_parallelism":
1703 1 : o.Experimental.ForceWriterParallelism, err = strconv.ParseBool(value)
1704 1 : case "secondary_cache_size_bytes":
1705 1 : o.Experimental.SecondaryCacheSizeBytes, err = strconv.ParseInt(value, 10, 64)
1706 1 : case "create_on_shared":
1707 1 : var createOnSharedInt int64
1708 1 : createOnSharedInt, err = strconv.ParseInt(value, 10, 64)
1709 1 : o.Experimental.CreateOnShared = remote.CreateOnSharedStrategy(createOnSharedInt)
1710 0 : default:
1711 0 : if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section+"."+key, value) {
1712 0 : return nil
1713 0 : }
1714 0 : return errors.Errorf("pebble: unknown option: %s.%s",
1715 0 : errors.Safe(section), errors.Safe(key))
1716 : }
1717 1 : return err
1718 :
1719 1 : case section == "WAL Failover":
1720 1 : if o.WALFailover == nil {
1721 1 : o.WALFailover = new(WALFailoverOptions)
1722 1 : }
1723 1 : var err error
1724 1 : switch key {
1725 1 : case "secondary_dir":
1726 1 : o.WALFailover.Secondary = wal.Dir{Dirname: value, FS: vfs.Default}
1727 1 : case "primary_dir_probe_interval":
1728 1 : o.WALFailover.PrimaryDirProbeInterval, err = time.ParseDuration(value)
1729 1 : case "healthy_probe_latency_threshold":
1730 1 : o.WALFailover.HealthyProbeLatencyThreshold, err = time.ParseDuration(value)
1731 1 : case "healthy_interval":
1732 1 : o.WALFailover.HealthyInterval, err = time.ParseDuration(value)
1733 1 : case "unhealthy_sampling_interval":
1734 1 : o.WALFailover.UnhealthySamplingInterval, err = time.ParseDuration(value)
1735 1 : case "unhealthy_operation_latency_threshold":
1736 1 : var threshold time.Duration
1737 1 : threshold, err = time.ParseDuration(value)
1738 1 : o.WALFailover.UnhealthyOperationLatencyThreshold = func() (time.Duration, bool) {
1739 1 : return threshold, true
1740 1 : }
1741 1 : case "elevated_write_stall_threshold_lag":
1742 1 : o.WALFailover.ElevatedWriteStallThresholdLag, err = time.ParseDuration(value)
1743 0 : default:
1744 0 : if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section+"."+key, value) {
1745 0 : return nil
1746 0 : }
1747 0 : return errors.Errorf("pebble: unknown option: %s.%s",
1748 0 : errors.Safe(section), errors.Safe(key))
1749 : }
1750 1 : return err
1751 :
1752 1 : case strings.HasPrefix(section, "Level "):
1753 1 : var index int
1754 1 : if n, err := fmt.Sscanf(section, `Level "%d"`, &index); err != nil {
1755 0 : return err
1756 1 : } else if n != 1 {
1757 0 : if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section, value) {
1758 0 : return nil
1759 0 : }
1760 0 : return errors.Errorf("pebble: unknown section: %q", errors.Safe(section))
1761 : }
1762 :
1763 1 : if len(o.Levels) <= index {
1764 1 : newLevels := make([]LevelOptions, index+1)
1765 1 : copy(newLevels, o.Levels)
1766 1 : o.Levels = newLevels
1767 1 : }
1768 1 : l := &o.Levels[index]
1769 1 :
1770 1 : var err error
1771 1 : switch key {
1772 1 : case "block_restart_interval":
1773 1 : l.BlockRestartInterval, err = strconv.Atoi(value)
1774 1 : case "block_size":
1775 1 : l.BlockSize, err = strconv.Atoi(value)
1776 1 : case "block_size_threshold":
1777 1 : l.BlockSizeThreshold, err = strconv.Atoi(value)
1778 1 : case "compression":
1779 1 : switch value {
1780 0 : case "Default":
1781 0 : l.Compression = func() sstable.Compression { return DefaultCompression }
1782 1 : case "NoCompression":
1783 1 : l.Compression = func() sstable.Compression { return NoCompression }
1784 1 : case "Snappy":
1785 1 : l.Compression = func() sstable.Compression { return SnappyCompression }
1786 1 : case "ZSTD":
1787 1 : l.Compression = func() sstable.Compression { return ZstdCompression }
1788 0 : default:
1789 0 : return errors.Errorf("pebble: unknown compression: %q", errors.Safe(value))
1790 : }
1791 1 : case "filter_policy":
1792 1 : if hooks != nil && hooks.NewFilterPolicy != nil {
1793 1 : l.FilterPolicy, err = hooks.NewFilterPolicy(value)
1794 1 : }
1795 1 : case "filter_type":
1796 1 : switch value {
1797 1 : case "table":
1798 1 : l.FilterType = TableFilter
1799 0 : default:
1800 0 : return errors.Errorf("pebble: unknown filter type: %q", errors.Safe(value))
1801 : }
1802 1 : case "index_block_size":
1803 1 : l.IndexBlockSize, err = strconv.Atoi(value)
1804 1 : case "target_file_size":
1805 1 : l.TargetFileSize, err = strconv.ParseInt(value, 10, 64)
1806 0 : default:
1807 0 : if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section+"."+key, value) {
1808 0 : return nil
1809 0 : }
1810 0 : return errors.Errorf("pebble: unknown option: %s.%s", errors.Safe(section), errors.Safe(key))
1811 : }
1812 1 : return err
1813 : }
1814 1 : if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section+"."+key, value) {
1815 1 : return nil
1816 1 : }
1817 0 : return errors.Errorf("pebble: unknown section: %q", errors.Safe(section))
1818 : })
1819 : }
1820 :
1821 : // ErrMissingWALRecoveryDir is an error returned when a database is attempted to be
1822 : // opened without supplying a Options.WALRecoveryDir entry for a directory that
1823 : // may contain WALs required to recover a consistent database state.
1824 : type ErrMissingWALRecoveryDir struct {
1825 : Dir string
1826 : }
1827 :
1828 : // Error implements error.
1829 1 : func (e ErrMissingWALRecoveryDir) Error() string {
1830 1 : return fmt.Sprintf("directory %q may contain relevant WALs", e.Dir)
1831 1 : }
1832 :
1833 : // CheckCompatibility verifies the options are compatible with the previous options
1834 : // serialized by Options.String(). For example, the Comparer and Merger must be
1835 : // the same, or data will not be able to be properly read from the DB.
1836 : //
1837 : // This function only looks at specific keys and does not error out if the
1838 : // options are newer and contain unknown keys.
1839 1 : func (o *Options) CheckCompatibility(previousOptions string) error {
1840 1 : return parseOptions(previousOptions, func(section, key, value string) error {
1841 1 : switch section + "." + key {
1842 1 : case "Options.comparer":
1843 1 : if value != o.Comparer.Name {
1844 1 : return errors.Errorf("pebble: comparer name from file %q != comparer name from options %q",
1845 1 : errors.Safe(value), errors.Safe(o.Comparer.Name))
1846 1 : }
1847 1 : case "Options.merger":
1848 1 : // RocksDB allows the merge operator to be unspecified, in which case it
1849 1 : // shows up as "nullptr".
1850 1 : if value != "nullptr" && value != o.Merger.Name {
1851 1 : return errors.Errorf("pebble: merger name from file %q != merger name from options %q",
1852 1 : errors.Safe(value), errors.Safe(o.Merger.Name))
1853 1 : }
1854 1 : case "Options.wal_dir", "WAL Failover.secondary_dir":
1855 1 : switch {
1856 1 : case o.WALDir == value:
1857 1 : return nil
1858 1 : case o.WALFailover != nil && o.WALFailover.Secondary.Dirname == value:
1859 1 : return nil
1860 1 : default:
1861 1 : for _, d := range o.WALRecoveryDirs {
1862 1 : if d.Dirname == value {
1863 1 : return nil
1864 1 : }
1865 : }
1866 1 : return ErrMissingWALRecoveryDir{Dir: value}
1867 : }
1868 : }
1869 1 : return nil
1870 : })
1871 : }
1872 :
1873 : // Validate verifies that the options are mutually consistent. For example,
1874 : // L0StopWritesThreshold must be >= L0CompactionThreshold, otherwise a write
1875 : // stall would persist indefinitely.
1876 1 : func (o *Options) Validate() error {
1877 1 : // Note that we can presume Options.EnsureDefaults has been called, so there
1878 1 : // is no need to check for zero values.
1879 1 :
1880 1 : var buf strings.Builder
1881 1 : if o.Experimental.L0CompactionConcurrency < 1 {
1882 1 : fmt.Fprintf(&buf, "L0CompactionConcurrency (%d) must be >= 1\n",
1883 1 : o.Experimental.L0CompactionConcurrency)
1884 1 : }
1885 1 : if o.L0StopWritesThreshold < o.L0CompactionThreshold {
1886 1 : fmt.Fprintf(&buf, "L0StopWritesThreshold (%d) must be >= L0CompactionThreshold (%d)\n",
1887 1 : o.L0StopWritesThreshold, o.L0CompactionThreshold)
1888 1 : }
1889 1 : if uint64(o.MemTableSize) >= maxMemTableSize {
1890 1 : fmt.Fprintf(&buf, "MemTableSize (%s) must be < %s\n",
1891 1 : humanize.Bytes.Uint64(uint64(o.MemTableSize)), humanize.Bytes.Uint64(maxMemTableSize))
1892 1 : }
1893 1 : if o.MemTableStopWritesThreshold < 2 {
1894 1 : fmt.Fprintf(&buf, "MemTableStopWritesThreshold (%d) must be >= 2\n",
1895 1 : o.MemTableStopWritesThreshold)
1896 1 : }
1897 1 : if o.FormatMajorVersion < FormatMinSupported || o.FormatMajorVersion > internalFormatNewest {
1898 0 : fmt.Fprintf(&buf, "FormatMajorVersion (%d) must be between %d and %d\n",
1899 0 : o.FormatMajorVersion, FormatMinSupported, internalFormatNewest)
1900 0 : }
1901 1 : if o.Experimental.CreateOnShared != remote.CreateOnSharedNone && o.FormatMajorVersion < FormatMinForSharedObjects {
1902 0 : fmt.Fprintf(&buf, "FormatMajorVersion (%d) when CreateOnShared is set must be at least %d\n",
1903 0 : o.FormatMajorVersion, FormatMinForSharedObjects)
1904 0 :
1905 0 : }
1906 1 : if o.TableCache != nil && o.Cache != o.TableCache.cache {
1907 1 : fmt.Fprintf(&buf, "underlying cache in the TableCache and the Cache dont match\n")
1908 1 : }
1909 1 : if buf.Len() == 0 {
1910 1 : return nil
1911 1 : }
1912 1 : return errors.New(buf.String())
1913 : }
1914 :
1915 : // MakeReaderOptions constructs sstable.ReaderOptions from the corresponding
1916 : // options in the receiver.
1917 1 : func (o *Options) MakeReaderOptions() sstable.ReaderOptions {
1918 1 : var readerOpts sstable.ReaderOptions
1919 1 : if o != nil {
1920 1 : readerOpts.Cache = o.Cache
1921 1 : readerOpts.Comparer = o.Comparer
1922 1 : readerOpts.Filters = o.Filters
1923 1 : if o.Merger != nil {
1924 1 : readerOpts.Merge = o.Merger.Merge
1925 1 : readerOpts.MergerName = o.Merger.Name
1926 1 : }
1927 1 : readerOpts.LoggerAndTracer = o.LoggerAndTracer
1928 : }
1929 1 : return readerOpts
1930 : }
1931 :
1932 : // MakeWriterOptions constructs sstable.WriterOptions for the specified level
1933 : // from the corresponding options in the receiver.
1934 1 : func (o *Options) MakeWriterOptions(level int, format sstable.TableFormat) sstable.WriterOptions {
1935 1 : var writerOpts sstable.WriterOptions
1936 1 : writerOpts.TableFormat = format
1937 1 : if o != nil {
1938 1 : writerOpts.Cache = o.Cache
1939 1 : writerOpts.Comparer = o.Comparer
1940 1 : if o.Merger != nil {
1941 1 : writerOpts.MergerName = o.Merger.Name
1942 1 : }
1943 1 : writerOpts.BlockPropertyCollectors = o.BlockPropertyCollectors
1944 : }
1945 1 : if format >= sstable.TableFormatPebblev3 {
1946 1 : writerOpts.ShortAttributeExtractor = o.Experimental.ShortAttributeExtractor
1947 1 : writerOpts.RequiredInPlaceValueBound = o.Experimental.RequiredInPlaceValueBound
1948 1 : if format >= sstable.TableFormatPebblev4 && level == numLevels-1 {
1949 1 : writerOpts.WritingToLowestLevel = true
1950 1 : }
1951 : }
1952 1 : levelOpts := o.Level(level)
1953 1 : writerOpts.BlockRestartInterval = levelOpts.BlockRestartInterval
1954 1 : writerOpts.BlockSize = levelOpts.BlockSize
1955 1 : writerOpts.BlockSizeThreshold = levelOpts.BlockSizeThreshold
1956 1 : writerOpts.Compression = resolveDefaultCompression(levelOpts.Compression())
1957 1 : writerOpts.FilterPolicy = levelOpts.FilterPolicy
1958 1 : writerOpts.FilterType = levelOpts.FilterType
1959 1 : writerOpts.IndexBlockSize = levelOpts.IndexBlockSize
1960 1 : return writerOpts
1961 : }
1962 :
1963 1 : func resolveDefaultCompression(c Compression) Compression {
1964 1 : if c <= DefaultCompression || c >= sstable.NCompression {
1965 1 : c = SnappyCompression
1966 1 : }
1967 1 : return c
1968 : }
|