Line data Source code
1 : // Copyright 2011 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package pebble
6 :
7 : import (
8 : "bytes"
9 : "fmt"
10 : "io"
11 : "runtime"
12 : "sort"
13 : "strconv"
14 : "strings"
15 : "time"
16 : "unicode"
17 :
18 : "github.com/cockroachdb/crlib/fifo"
19 : "github.com/cockroachdb/errors"
20 : "github.com/cockroachdb/pebble/internal/base"
21 : "github.com/cockroachdb/pebble/internal/cache"
22 : "github.com/cockroachdb/pebble/internal/humanize"
23 : "github.com/cockroachdb/pebble/internal/keyspan"
24 : "github.com/cockroachdb/pebble/internal/manifest"
25 : "github.com/cockroachdb/pebble/internal/testkeys"
26 : "github.com/cockroachdb/pebble/objstorage/objstorageprovider"
27 : "github.com/cockroachdb/pebble/objstorage/remote"
28 : "github.com/cockroachdb/pebble/rangekey"
29 : "github.com/cockroachdb/pebble/sstable"
30 : "github.com/cockroachdb/pebble/sstable/block"
31 : "github.com/cockroachdb/pebble/sstable/colblk"
32 : "github.com/cockroachdb/pebble/vfs"
33 : "github.com/cockroachdb/pebble/wal"
34 : )
35 :
36 : const (
37 : cacheDefaultSize = 8 << 20 // 8 MB
38 : defaultLevelMultiplier = 10
39 : )
40 :
41 : // Compression exports the base.Compression type.
42 : type Compression = block.Compression
43 :
44 : // Exported Compression constants.
45 : const (
46 : DefaultCompression = block.DefaultCompression
47 : NoCompression = block.NoCompression
48 : SnappyCompression = block.SnappyCompression
49 : ZstdCompression = block.ZstdCompression
50 : )
51 :
52 : // FilterType exports the base.FilterType type.
53 : type FilterType = base.FilterType
54 :
55 : // Exported TableFilter constants.
56 : const (
57 : TableFilter = base.TableFilter
58 : )
59 :
60 : // FilterWriter exports the base.FilterWriter type.
61 : type FilterWriter = base.FilterWriter
62 :
63 : // FilterPolicy exports the base.FilterPolicy type.
64 : type FilterPolicy = base.FilterPolicy
65 :
66 : // KeySchema exports the colblk.KeySchema type.
67 : type KeySchema = colblk.KeySchema
68 :
69 : // BlockPropertyCollector exports the sstable.BlockPropertyCollector type.
70 : type BlockPropertyCollector = sstable.BlockPropertyCollector
71 :
72 : // BlockPropertyFilter exports the sstable.BlockPropertyFilter type.
73 : type BlockPropertyFilter = base.BlockPropertyFilter
74 :
75 : // ShortAttributeExtractor exports the base.ShortAttributeExtractor type.
76 : type ShortAttributeExtractor = base.ShortAttributeExtractor
77 :
78 : // UserKeyPrefixBound exports the sstable.UserKeyPrefixBound type.
79 : type UserKeyPrefixBound = sstable.UserKeyPrefixBound
80 :
81 : // IterKeyType configures which types of keys an iterator should surface.
82 : type IterKeyType int8
83 :
84 : const (
85 : // IterKeyTypePointsOnly configures an iterator to iterate over point keys
86 : // only.
87 : IterKeyTypePointsOnly IterKeyType = iota
88 : // IterKeyTypeRangesOnly configures an iterator to iterate over range keys
89 : // only.
90 : IterKeyTypeRangesOnly
91 : // IterKeyTypePointsAndRanges configures an iterator iterate over both point
92 : // keys and range keys simultaneously.
93 : IterKeyTypePointsAndRanges
94 : )
95 :
96 : // String implements fmt.Stringer.
97 1 : func (t IterKeyType) String() string {
98 1 : switch t {
99 1 : case IterKeyTypePointsOnly:
100 1 : return "points-only"
101 1 : case IterKeyTypeRangesOnly:
102 1 : return "ranges-only"
103 1 : case IterKeyTypePointsAndRanges:
104 1 : return "points-and-ranges"
105 0 : default:
106 0 : panic(fmt.Sprintf("unknown key type %d", t))
107 : }
108 : }
109 :
110 : // IterOptions hold the optional per-query parameters for NewIter.
111 : //
112 : // Like Options, a nil *IterOptions is valid and means to use the default
113 : // values.
114 : type IterOptions struct {
115 : // LowerBound specifies the smallest key (inclusive) that the iterator will
116 : // return during iteration. If the iterator is seeked or iterated past this
117 : // boundary the iterator will return Valid()==false. Setting LowerBound
118 : // effectively truncates the key space visible to the iterator.
119 : LowerBound []byte
120 : // UpperBound specifies the largest key (exclusive) that the iterator will
121 : // return during iteration. If the iterator is seeked or iterated past this
122 : // boundary the iterator will return Valid()==false. Setting UpperBound
123 : // effectively truncates the key space visible to the iterator.
124 : UpperBound []byte
125 : // SkipPoint may be used to skip over point keys that don't match an
126 : // arbitrary predicate during iteration. If set, the Iterator invokes
127 : // SkipPoint for keys encountered. If SkipPoint returns true, the iterator
128 : // will skip the key without yielding it to the iterator operation in
129 : // progress.
130 : //
131 : // SkipPoint must be a pure function and always return the same result when
132 : // provided the same arguments. The iterator may call SkipPoint multiple
133 : // times for the same user key.
134 : SkipPoint func(userKey []byte) bool
135 : // PointKeyFilters can be used to avoid scanning tables and blocks in tables
136 : // when iterating over point keys. This slice represents an intersection
137 : // across all filters, i.e., all filters must indicate that the block is
138 : // relevant.
139 : //
140 : // Performance note: When len(PointKeyFilters) > 0, the caller should ensure
141 : // that cap(PointKeyFilters) is at least len(PointKeyFilters)+1. This helps
142 : // avoid allocations in Pebble internal code that mutates the slice.
143 : PointKeyFilters []BlockPropertyFilter
144 : // RangeKeyFilters can be usefd to avoid scanning tables and blocks in tables
145 : // when iterating over range keys. The same requirements that apply to
146 : // PointKeyFilters apply here too.
147 : RangeKeyFilters []BlockPropertyFilter
148 : // KeyTypes configures which types of keys to iterate over: point keys,
149 : // range keys, or both.
150 : KeyTypes IterKeyType
151 : // RangeKeyMasking can be used to enable automatic masking of point keys by
152 : // range keys. Range key masking is only supported during combined range key
153 : // and point key iteration mode (IterKeyTypePointsAndRanges).
154 : RangeKeyMasking RangeKeyMasking
155 :
156 : // OnlyReadGuaranteedDurable is an advanced option that is only supported by
157 : // the Reader implemented by DB. When set to true, only the guaranteed to be
158 : // durable state is visible in the iterator.
159 : // - This definition is made under the assumption that the FS implementation
160 : // is providing a durability guarantee when data is synced.
161 : // - The visible state represents a consistent point in the history of the
162 : // DB.
163 : // - The implementation is free to choose a conservative definition of what
164 : // is guaranteed durable. For simplicity, the current implementation
165 : // ignores memtables. A more sophisticated implementation could track the
166 : // highest seqnum that is synced to the WAL and published and use that as
167 : // the visible seqnum for an iterator. Note that the latter approach is
168 : // not strictly better than the former since we can have DBs that are (a)
169 : // synced more rarely than memtable flushes, (b) have no WAL. (a) is
170 : // likely to be true in a future CockroachDB context where the DB
171 : // containing the state machine may be rarely synced.
172 : // NB: this current implementation relies on the fact that memtables are
173 : // flushed in seqnum order, and any ingested sstables that happen to have a
174 : // lower seqnum than a non-flushed memtable don't have any overlapping keys.
175 : // This is the fundamental level invariant used in other code too, like when
176 : // merging iterators.
177 : //
178 : // Semantically, using this option provides the caller a "snapshot" as of
179 : // the time the most recent memtable was flushed. An alternate interface
180 : // would be to add a NewSnapshot variant. Creating a snapshot is heavier
181 : // weight than creating an iterator, so we have opted to support this
182 : // iterator option.
183 : OnlyReadGuaranteedDurable bool
184 : // UseL6Filters allows the caller to opt into reading filter blocks for L6
185 : // sstables. Helpful if a lot of SeekPrefixGEs are expected in quick
186 : // succession, that are also likely to not yield a single key. Filter blocks in
187 : // L6 can be relatively large, often larger than data blocks, so the benefit of
188 : // loading them in the cache is minimized if the probability of the key
189 : // existing is not low or if we just expect a one-time Seek (where loading the
190 : // data block directly is better).
191 : UseL6Filters bool
192 : // Category is used for categorized iterator stats. This should not be
193 : // changed by calling SetOptions.
194 : Category block.Category
195 :
196 : DebugRangeKeyStack bool
197 :
198 : // Internal options.
199 :
200 : logger Logger
201 : // Layer corresponding to this file. Only passed in if constructed by a
202 : // levelIter.
203 : layer manifest.Layer
204 : // disableLazyCombinedIteration is an internal testing option.
205 : disableLazyCombinedIteration bool
206 : // snapshotForHideObsoletePoints is specified for/by levelIter when opening
207 : // files and is used to decide whether to hide obsolete points. A value of 0
208 : // implies obsolete points should not be hidden.
209 : snapshotForHideObsoletePoints base.SeqNum
210 :
211 : // NB: If adding new Options, you must account for them in iterator
212 : // construction and Iterator.SetOptions.
213 : }
214 :
215 : // GetLowerBound returns the LowerBound or nil if the receiver is nil.
216 1 : func (o *IterOptions) GetLowerBound() []byte {
217 1 : if o == nil {
218 1 : return nil
219 1 : }
220 1 : return o.LowerBound
221 : }
222 :
223 : // GetUpperBound returns the UpperBound or nil if the receiver is nil.
224 1 : func (o *IterOptions) GetUpperBound() []byte {
225 1 : if o == nil {
226 1 : return nil
227 1 : }
228 1 : return o.UpperBound
229 : }
230 :
231 1 : func (o *IterOptions) pointKeys() bool {
232 1 : if o == nil {
233 0 : return true
234 0 : }
235 1 : return o.KeyTypes == IterKeyTypePointsOnly || o.KeyTypes == IterKeyTypePointsAndRanges
236 : }
237 :
238 1 : func (o *IterOptions) rangeKeys() bool {
239 1 : if o == nil {
240 0 : return false
241 0 : }
242 1 : return o.KeyTypes == IterKeyTypeRangesOnly || o.KeyTypes == IterKeyTypePointsAndRanges
243 : }
244 :
245 1 : func (o *IterOptions) getLogger() Logger {
246 1 : if o == nil || o.logger == nil {
247 1 : return DefaultLogger
248 1 : }
249 1 : return o.logger
250 : }
251 :
252 : // SpanIterOptions creates a SpanIterOptions from this IterOptions.
253 1 : func (o *IterOptions) SpanIterOptions() keyspan.SpanIterOptions {
254 1 : if o == nil {
255 1 : return keyspan.SpanIterOptions{}
256 1 : }
257 1 : return keyspan.SpanIterOptions{
258 1 : RangeKeyFilters: o.RangeKeyFilters,
259 1 : }
260 : }
261 :
262 : // scanInternalOptions is similar to IterOptions, meant for use with
263 : // scanInternalIterator.
264 : type scanInternalOptions struct {
265 : IterOptions
266 :
267 : category block.Category
268 :
269 : visitPointKey func(key *InternalKey, value LazyValue, iterInfo IteratorLevel) error
270 : visitRangeDel func(start, end []byte, seqNum SeqNum) error
271 : visitRangeKey func(start, end []byte, keys []rangekey.Key) error
272 : visitSharedFile func(sst *SharedSSTMeta) error
273 : visitExternalFile func(sst *ExternalFile) error
274 :
275 : // includeObsoleteKeys specifies whether keys shadowed by newer internal keys
276 : // are exposed. If false, only one internal key per user key is exposed.
277 : includeObsoleteKeys bool
278 :
279 : // rateLimitFunc is used to limit the amount of bytes read per second.
280 : rateLimitFunc func(key *InternalKey, value LazyValue) error
281 : }
282 :
283 : // RangeKeyMasking configures automatic hiding of point keys by range keys. A
284 : // non-nil Suffix enables range-key masking. When enabled, range keys with
285 : // suffixes ≥ Suffix behave as masks. All point keys that are contained within a
286 : // masking range key's bounds and have suffixes greater than the range key's
287 : // suffix are automatically skipped.
288 : //
289 : // Specifically, when configured with a RangeKeyMasking.Suffix _s_, and there
290 : // exists a range key with suffix _r_ covering a point key with suffix _p_, and
291 : //
292 : // _s_ ≤ _r_ < _p_
293 : //
294 : // then the point key is elided.
295 : //
296 : // Range-key masking may only be used when iterating over both point keys and
297 : // range keys with IterKeyTypePointsAndRanges.
298 : type RangeKeyMasking struct {
299 : // Suffix configures which range keys may mask point keys. Only range keys
300 : // that are defined at suffixes greater than or equal to Suffix will mask
301 : // point keys.
302 : Suffix []byte
303 : // Filter is an optional field that may be used to improve performance of
304 : // range-key masking through a block-property filter defined over key
305 : // suffixes. If non-nil, Filter is called by Pebble to construct a
306 : // block-property filter mask at iterator creation. The filter is used to
307 : // skip whole point-key blocks containing point keys with suffixes greater
308 : // than a covering range-key's suffix.
309 : //
310 : // To use this functionality, the caller must create and configure (through
311 : // Options.BlockPropertyCollectors) a block-property collector that records
312 : // the maxmimum suffix contained within a block. The caller then must write
313 : // and provide a BlockPropertyFilterMask implementation on that same
314 : // property. See the BlockPropertyFilterMask type for more information.
315 : Filter func() BlockPropertyFilterMask
316 : }
317 :
318 : // BlockPropertyFilterMask extends the BlockPropertyFilter interface for use
319 : // with range-key masking. Unlike an ordinary block property filter, a
320 : // BlockPropertyFilterMask's filtering criteria is allowed to change when Pebble
321 : // invokes its SetSuffix method.
322 : //
323 : // When a Pebble iterator steps into a range key's bounds and the range key has
324 : // a suffix greater than or equal to RangeKeyMasking.Suffix, the range key acts
325 : // as a mask. The masking range key hides all point keys that fall within the
326 : // range key's bounds and have suffixes > the range key's suffix. Without a
327 : // filter mask configured, Pebble performs this hiding by stepping through point
328 : // keys and comparing suffixes. If large numbers of point keys are masked, this
329 : // requires Pebble to load, iterate through and discard a large number of
330 : // sstable blocks containing masked point keys.
331 : //
332 : // If a block-property collector and a filter mask are configured, Pebble may
333 : // skip loading some point-key blocks altogether. If a block's keys are known to
334 : // all fall within the bounds of the masking range key and the block was
335 : // annotated by a block-property collector with the maximal suffix, Pebble can
336 : // ask the filter mask to compare the property to the current masking range
337 : // key's suffix. If the mask reports no intersection, the block may be skipped.
338 : //
339 : // If unsuffixed and suffixed keys are written to the database, care must be
340 : // taken to avoid unintentionally masking un-suffixed keys located in the same
341 : // block as suffixed keys. One solution is to interpret unsuffixed keys as
342 : // containing the maximal suffix value, ensuring that blocks containing
343 : // unsuffixed keys are always loaded.
344 : type BlockPropertyFilterMask interface {
345 : BlockPropertyFilter
346 :
347 : // SetSuffix configures the mask with the suffix of a range key. The filter
348 : // should return false from Intersects whenever it's provided with a
349 : // property encoding a block's minimum suffix that's greater (according to
350 : // Compare) than the provided suffix.
351 : SetSuffix(suffix []byte) error
352 : }
353 :
354 : // WriteOptions hold the optional per-query parameters for Set and Delete
355 : // operations.
356 : //
357 : // Like Options, a nil *WriteOptions is valid and means to use the default
358 : // values.
359 : type WriteOptions struct {
360 : // Sync is whether to sync writes through the OS buffer cache and down onto
361 : // the actual disk, if applicable. Setting Sync is required for durability of
362 : // individual write operations but can result in slower writes.
363 : //
364 : // If false, and the process or machine crashes, then a recent write may be
365 : // lost. This is due to the recently written data being buffered inside the
366 : // process running Pebble. This differs from the semantics of a write system
367 : // call in which the data is buffered in the OS buffer cache and would thus
368 : // survive a process crash.
369 : //
370 : // The default value is true.
371 : Sync bool
372 : }
373 :
374 : // Sync specifies the default write options for writes which synchronize to
375 : // disk.
376 : var Sync = &WriteOptions{Sync: true}
377 :
378 : // NoSync specifies the default write options for writes which do not
379 : // synchronize to disk.
380 : var NoSync = &WriteOptions{Sync: false}
381 :
382 : // GetSync returns the Sync value or true if the receiver is nil.
383 1 : func (o *WriteOptions) GetSync() bool {
384 1 : return o == nil || o.Sync
385 1 : }
386 :
387 : // LevelOptions holds the optional per-level parameters.
388 : type LevelOptions struct {
389 : // BlockRestartInterval is the number of keys between restart points
390 : // for delta encoding of keys.
391 : //
392 : // The default value is 16.
393 : BlockRestartInterval int
394 :
395 : // BlockSize is the target uncompressed size in bytes of each table block.
396 : //
397 : // The default value is 4096.
398 : BlockSize int
399 :
400 : // BlockSizeThreshold finishes a block if the block size is larger than the
401 : // specified percentage of the target block size and adding the next entry
402 : // would cause the block to be larger than the target block size.
403 : //
404 : // The default value is 90
405 : BlockSizeThreshold int
406 :
407 : // Compression defines the per-block compression to use.
408 : //
409 : // The default value (DefaultCompression) uses snappy compression.
410 : Compression func() Compression
411 :
412 : // FilterPolicy defines a filter algorithm (such as a Bloom filter) that can
413 : // reduce disk reads for Get calls.
414 : //
415 : // One such implementation is bloom.FilterPolicy(10) from the pebble/bloom
416 : // package.
417 : //
418 : // The default value means to use no filter.
419 : FilterPolicy FilterPolicy
420 :
421 : // FilterType defines whether an existing filter policy is applied at a
422 : // block-level or table-level. Block-level filters use less memory to create,
423 : // but are slower to access as a check for the key in the index must first be
424 : // performed to locate the filter block. A table-level filter will require
425 : // memory proportional to the number of keys in an sstable to create, but
426 : // avoids the index lookup when determining if a key is present. Table-level
427 : // filters should be preferred except under constrained memory situations.
428 : FilterType FilterType
429 :
430 : // IndexBlockSize is the target uncompressed size in bytes of each index
431 : // block. When the index block size is larger than this target, two-level
432 : // indexes are automatically enabled. Setting this option to a large value
433 : // (such as math.MaxInt32) disables the automatic creation of two-level
434 : // indexes.
435 : //
436 : // The default value is the value of BlockSize.
437 : IndexBlockSize int
438 :
439 : // The target file size for the level.
440 : TargetFileSize int64
441 : }
442 :
443 : // EnsureDefaults ensures that the default values for all of the options have
444 : // been initialized. It is valid to call EnsureDefaults on a nil receiver. A
445 : // non-nil result will always be returned.
446 1 : func (o *LevelOptions) EnsureDefaults() {
447 1 : if o.BlockRestartInterval <= 0 {
448 1 : o.BlockRestartInterval = base.DefaultBlockRestartInterval
449 1 : }
450 1 : if o.BlockSize <= 0 {
451 1 : o.BlockSize = base.DefaultBlockSize
452 1 : } else if o.BlockSize > sstable.MaximumBlockSize {
453 0 : panic(errors.Errorf("BlockSize %d exceeds MaximumBlockSize", o.BlockSize))
454 : }
455 1 : if o.BlockSizeThreshold <= 0 {
456 1 : o.BlockSizeThreshold = base.DefaultBlockSizeThreshold
457 1 : }
458 1 : if o.Compression == nil {
459 1 : o.Compression = func() Compression { return DefaultCompression }
460 : }
461 1 : if o.IndexBlockSize <= 0 {
462 1 : o.IndexBlockSize = o.BlockSize
463 1 : }
464 1 : if o.TargetFileSize <= 0 {
465 1 : o.TargetFileSize = 2 << 20 // 2 MB
466 1 : }
467 : }
468 :
469 : // Options holds the optional parameters for configuring pebble. These options
470 : // apply to the DB at large; per-query options are defined by the IterOptions
471 : // and WriteOptions types.
472 : type Options struct {
473 : // Sync sstables periodically in order to smooth out writes to disk. This
474 : // option does not provide any persistency guarantee, but is used to avoid
475 : // latency spikes if the OS automatically decides to write out a large chunk
476 : // of dirty filesystem buffers. This option only controls SSTable syncs; WAL
477 : // syncs are controlled by WALBytesPerSync.
478 : //
479 : // The default value is 512KB.
480 : BytesPerSync int
481 :
482 : // Cache is used to cache uncompressed blocks from sstables. If it is nil,
483 : // a block cache of CacheSize will be created for each DB.
484 : Cache *cache.Cache
485 : // CacheSize is used when Cache is not set. The default value is 8 MB.
486 : CacheSize int64
487 :
488 : // LoadBlockSema, if set, is used to limit the number of blocks that can be
489 : // loaded (i.e. read from the filesystem) in parallel. Each load acquires one
490 : // unit from the semaphore for the duration of the read.
491 : LoadBlockSema *fifo.Semaphore
492 :
493 : // Cleaner cleans obsolete files.
494 : //
495 : // The default cleaner uses the DeleteCleaner.
496 : Cleaner Cleaner
497 :
498 : // Local contains option that pertain to files stored on the local filesystem.
499 : Local struct {
500 : // ReadaheadConfig is used to retrieve the current readahead mode; it is
501 : // consulted whenever a read handle is initialized.
502 : ReadaheadConfig *ReadaheadConfig
503 :
504 : // TODO(radu): move BytesPerSync, LoadBlockSema, Cleaner here.
505 : }
506 :
507 : // Comparer defines a total ordering over the space of []byte keys: a 'less
508 : // than' relationship. The same comparison algorithm must be used for reads
509 : // and writes over the lifetime of the DB.
510 : //
511 : // The default value uses the same ordering as bytes.Compare.
512 : Comparer *Comparer
513 :
514 : // DebugCheck is invoked, if non-nil, whenever a new version is being
515 : // installed. Typically, this is set to pebble.DebugCheckLevels in tests
516 : // or tools only, to check invariants over all the data in the database.
517 : DebugCheck func(*DB) error
518 :
519 : // Disable the write-ahead log (WAL). Disabling the write-ahead log prohibits
520 : // crash recovery, but can improve performance if crash recovery is not
521 : // needed (e.g. when only temporary state is being stored in the database).
522 : //
523 : // TODO(peter): untested
524 : DisableWAL bool
525 :
526 : // ErrorIfExists causes an error on Open if the database already exists.
527 : // The error can be checked with errors.Is(err, ErrDBAlreadyExists).
528 : //
529 : // The default value is false.
530 : ErrorIfExists bool
531 :
532 : // ErrorIfNotExists causes an error on Open if the database does not already
533 : // exist. The error can be checked with errors.Is(err, ErrDBDoesNotExist).
534 : //
535 : // The default value is false which will cause a database to be created if it
536 : // does not already exist.
537 : ErrorIfNotExists bool
538 :
539 : // ErrorIfNotPristine causes an error on Open if the database already exists
540 : // and any operations have been performed on the database. The error can be
541 : // checked with errors.Is(err, ErrDBNotPristine).
542 : //
543 : // Note that a database that contained keys that were all subsequently deleted
544 : // may or may not trigger the error. Currently, we check if there are any live
545 : // SSTs or log records to replay.
546 : ErrorIfNotPristine bool
547 :
548 : // EventListener provides hooks to listening to significant DB events such as
549 : // flushes, compactions, and table deletion.
550 : EventListener *EventListener
551 :
552 : // Experimental contains experimental options which are off by default.
553 : // These options are temporary and will eventually either be deleted, moved
554 : // out of the experimental group, or made the non-adjustable default. These
555 : // options may change at any time, so do not rely on them.
556 : Experimental struct {
557 : // The threshold of L0 read-amplification at which compaction concurrency
558 : // is enabled (if CompactionDebtConcurrency was not already exceeded).
559 : // Every multiple of this value enables another concurrent
560 : // compaction up to MaxConcurrentCompactions.
561 : L0CompactionConcurrency int
562 :
563 : // CompactionDebtConcurrency controls the threshold of compaction debt
564 : // at which additional compaction concurrency slots are added. For every
565 : // multiple of this value in compaction debt bytes, an additional
566 : // concurrent compaction is added. This works "on top" of
567 : // L0CompactionConcurrency, so the higher of the count of compaction
568 : // concurrency slots as determined by the two options is chosen.
569 : CompactionDebtConcurrency uint64
570 :
571 : // IngestSplit, if it returns true, allows for ingest-time splitting of
572 : // existing sstables into two virtual sstables to allow ingestion sstables to
573 : // slot into a lower level than they otherwise would have.
574 : IngestSplit func() bool
575 :
576 : // ReadCompactionRate controls the frequency of read triggered
577 : // compactions by adjusting `AllowedSeeks` in manifest.TableMetadata:
578 : //
579 : // AllowedSeeks = FileSize / ReadCompactionRate
580 : //
581 : // From LevelDB:
582 : // ```
583 : // We arrange to automatically compact this file after
584 : // a certain number of seeks. Let's assume:
585 : // (1) One seek costs 10ms
586 : // (2) Writing or reading 1MB costs 10ms (100MB/s)
587 : // (3) A compaction of 1MB does 25MB of IO:
588 : // 1MB read from this level
589 : // 10-12MB read from next level (boundaries may be misaligned)
590 : // 10-12MB written to next level
591 : // This implies that 25 seeks cost the same as the compaction
592 : // of 1MB of data. I.e., one seek costs approximately the
593 : // same as the compaction of 40KB of data. We are a little
594 : // conservative and allow approximately one seek for every 16KB
595 : // of data before triggering a compaction.
596 : // ```
597 : ReadCompactionRate int64
598 :
599 : // ReadSamplingMultiplier is a multiplier for the readSamplingPeriod in
600 : // iterator.maybeSampleRead() to control the frequency of read sampling
601 : // to trigger a read triggered compaction. A value of -1 prevents sampling
602 : // and disables read triggered compactions. The default is 1 << 4. which
603 : // gets multiplied with a constant of 1 << 16 to yield 1 << 20 (1MB).
604 : ReadSamplingMultiplier int64
605 :
606 : // NumDeletionsThreshold defines the minimum number of point tombstones
607 : // that must be present in a single data block for that block to be
608 : // considered tombstone-dense for the purposes of triggering a
609 : // tombstone density compaction. Data blocks may also be considered
610 : // tombstone-dense if they meet the criteria defined by
611 : // DeletionSizeRatioThreshold below. Tombstone-dense blocks are identified
612 : // when sstables are written, and so this is effectively an option for
613 : // sstable writers. The default value is 100.
614 : NumDeletionsThreshold int
615 :
616 : // DeletionSizeRatioThreshold defines the minimum ratio of the size of
617 : // point tombstones to the size of a data block that must be reached
618 : // for that block to be considered tombstone-dense for the purposes of
619 : // triggering a tombstone density compaction. Data blocks may also be
620 : // considered tombstone-dense if they meet the criteria defined by
621 : // NumDeletionsThreshold above. Tombstone-dense blocks are identified
622 : // when sstables are written, and so this is effectively an option for
623 : // sstable writers. The default value is 0.5.
624 : DeletionSizeRatioThreshold float32
625 :
626 : // TombstoneDenseCompactionThreshold is the minimum percent of data
627 : // blocks in a table that must be tombstone-dense for that table to be
628 : // eligible for a tombstone density compaction. It should be defined as a
629 : // ratio out of 1. The default value is 0.10.
630 : //
631 : // If multiple tables are eligible for a tombstone density compaction, then
632 : // tables with a higher percent of tombstone-dense blocks are still
633 : // prioritized for compaction.
634 : //
635 : // A zero or negative value disables tombstone density compactions.
636 : TombstoneDenseCompactionThreshold float64
637 :
638 : // FileCacheShards is the number of shards per file cache.
639 : // Reducing the value can reduce the number of idle goroutines per DB
640 : // instance which can be useful in scenarios with a lot of DB instances
641 : // and a large number of CPUs, but doing so can lead to higher contention
642 : // in the file cache and reduced performance.
643 : //
644 : // The default value is the number of logical CPUs, which can be
645 : // limited by runtime.GOMAXPROCS.
646 : FileCacheShards int
647 :
648 : // ValidateOnIngest schedules validation of sstables after they have
649 : // been ingested.
650 : //
651 : // By default, this value is false.
652 : ValidateOnIngest bool
653 :
654 : // LevelMultiplier configures the size multiplier used to determine the
655 : // desired size of each level of the LSM. Defaults to 10.
656 : LevelMultiplier int
657 :
658 : // MultiLevelCompactionHeuristic determines whether to add an additional
659 : // level to a conventional two level compaction. If nil, a multilevel
660 : // compaction will never get triggered.
661 : MultiLevelCompactionHeuristic MultiLevelHeuristic
662 :
663 : // MaxWriterConcurrency is used to indicate the maximum number of
664 : // compression workers the compression queue is allowed to use. If
665 : // MaxWriterConcurrency > 0, then the Writer will use parallelism, to
666 : // compress and write blocks to disk. Otherwise, the writer will
667 : // compress and write blocks to disk synchronously.
668 : MaxWriterConcurrency int
669 :
670 : // ForceWriterParallelism is used to force parallelism in the sstable
671 : // Writer for the metamorphic tests. Even with the MaxWriterConcurrency
672 : // option set, we only enable parallelism in the sstable Writer if there
673 : // is enough CPU available, and this option bypasses that.
674 : ForceWriterParallelism bool
675 :
676 : // CPUWorkPermissionGranter should be set if Pebble should be given the
677 : // ability to optionally schedule additional CPU. See the documentation
678 : // for CPUWorkPermissionGranter for more details.
679 : CPUWorkPermissionGranter CPUWorkPermissionGranter
680 :
681 : // EnableColumnarBlocks is used to decide whether to enable writing
682 : // TableFormatPebblev5 sstables. This setting is only respected by
683 : // FormatColumnarBlocks. In lower format major versions, the
684 : // TableFormatPebblev5 format is prohibited. If EnableColumnarBlocks is
685 : // nil and the DB is at FormatColumnarBlocks, the DB defaults to not
686 : // writing columnar blocks.
687 : EnableColumnarBlocks func() bool
688 :
689 : // EnableValueBlocks is used to decide whether to enable writing
690 : // TableFormatPebblev3 sstables. This setting is only respected by a
691 : // specific subset of format major versions: FormatSSTableValueBlocks,
692 : // FormatFlushableIngest and FormatPrePebblev1MarkedCompacted. In lower
693 : // format major versions, value blocks are never enabled. In higher
694 : // format major versions, value blocks are always enabled.
695 : EnableValueBlocks func() bool
696 :
697 : // ShortAttributeExtractor is used iff EnableValueBlocks() returns true
698 : // (else ignored). If non-nil, a ShortAttribute can be extracted from the
699 : // value and stored with the key, when the value is stored elsewhere.
700 : ShortAttributeExtractor ShortAttributeExtractor
701 :
702 : // RequiredInPlaceValueBound specifies an optional span of user key
703 : // prefixes that are not-MVCC, but have a suffix. For these the values
704 : // must be stored with the key, since the concept of "older versions" is
705 : // not defined. It is also useful for statically known exclusions to value
706 : // separation. In CockroachDB, this will be used for the lock table key
707 : // space that has non-empty suffixes, but those locks don't represent
708 : // actual MVCC versions (the suffix ordering is arbitrary). We will also
709 : // need to add support for dynamically configured exclusions (we want the
710 : // default to be to allow Pebble to decide whether to separate the value
711 : // or not, hence this is structured as exclusions), for example, for users
712 : // of CockroachDB to dynamically exclude certain tables.
713 : //
714 : // Any change in exclusion behavior takes effect only on future written
715 : // sstables, and does not start rewriting existing sstables.
716 : //
717 : // Even ignoring changes in this setting, exclusions are interpreted as a
718 : // guidance by Pebble, and not necessarily honored. Specifically, user
719 : // keys with multiple Pebble-versions *may* have the older versions stored
720 : // in value blocks.
721 : RequiredInPlaceValueBound UserKeyPrefixBound
722 :
723 : // DisableIngestAsFlushable disables lazy ingestion of sstables through
724 : // a WAL write and memtable rotation. Only effectual if the format
725 : // major version is at least `FormatFlushableIngest`.
726 : DisableIngestAsFlushable func() bool
727 :
728 : // RemoteStorage enables use of remote storage (e.g. S3) for storing
729 : // sstables. Setting this option enables use of CreateOnShared option and
730 : // allows ingestion of external files.
731 : RemoteStorage remote.StorageFactory
732 :
733 : // If CreateOnShared is non-zero, new sstables are created on remote storage
734 : // (using CreateOnSharedLocator and with the appropriate
735 : // CreateOnSharedStrategy). These sstables can be shared between different
736 : // Pebble instances; the lifecycle of such objects is managed by the
737 : // remote.Storage constructed by options.RemoteStorage.
738 : //
739 : // Can only be used when RemoteStorage is set (and recognizes
740 : // CreateOnSharedLocator).
741 : CreateOnShared remote.CreateOnSharedStrategy
742 : CreateOnSharedLocator remote.Locator
743 :
744 : // CacheSizeBytesBytes is the size of the on-disk block cache for objects
745 : // on shared storage in bytes. If it is 0, no cache is used.
746 : SecondaryCacheSizeBytes int64
747 :
748 : // EnableDeleteOnlyCompactionExcises enables delete-only compactions to also
749 : // apply delete-only compaction hints on sstables that partially overlap
750 : // with it. This application happens through an excise, similar to
751 : // the excise phase of IngestAndExcise.
752 : EnableDeleteOnlyCompactionExcises func() bool
753 :
754 : // CompactionScheduler, if set, is used to limit concurrent compactions as
755 : // well as to pace compactions already chosen. If nil, a default scheduler
756 : // is created and used.
757 : CompactionScheduler CompactionScheduler
758 :
759 : UserKeyCategories UserKeyCategories
760 : }
761 :
762 : // Filters is a map from filter policy name to filter policy. It is used for
763 : // debugging tools which may be used on multiple databases configured with
764 : // different filter policies. It is not necessary to populate this filters
765 : // map during normal usage of a DB (it will be done automatically by
766 : // EnsureDefaults).
767 : Filters map[string]FilterPolicy
768 :
769 : // FlushDelayDeleteRange configures how long the database should wait before
770 : // forcing a flush of a memtable that contains a range deletion. Disk space
771 : // cannot be reclaimed until the range deletion is flushed. No automatic
772 : // flush occurs if zero.
773 : FlushDelayDeleteRange time.Duration
774 :
775 : // FlushDelayRangeKey configures how long the database should wait before
776 : // forcing a flush of a memtable that contains a range key. Range keys in
777 : // the memtable prevent lazy combined iteration, so it's desirable to flush
778 : // range keys promptly. No automatic flush occurs if zero.
779 : FlushDelayRangeKey time.Duration
780 :
781 : // FlushSplitBytes denotes the target number of bytes per sublevel in
782 : // each flush split interval (i.e. range between two flush split keys)
783 : // in L0 sstables. When set to zero, only a single sstable is generated
784 : // by each flush. When set to a non-zero value, flushes are split at
785 : // points to meet L0's TargetFileSize, any grandparent-related overlap
786 : // options, and at boundary keys of L0 flush split intervals (which are
787 : // targeted to contain around FlushSplitBytes bytes in each sublevel
788 : // between pairs of boundary keys). Splitting sstables during flush
789 : // allows increased compaction flexibility and concurrency when those
790 : // tables are compacted to lower levels.
791 : FlushSplitBytes int64
792 :
793 : // FormatMajorVersion sets the format of on-disk files. It is
794 : // recommended to set the format major version to an explicit
795 : // version, as the default may change over time.
796 : //
797 : // At Open if the existing database is formatted using a later
798 : // format major version that is known to this version of Pebble,
799 : // Pebble will continue to use the later format major version. If
800 : // the existing database's version is unknown, the caller may use
801 : // FormatMostCompatible and will be able to open the database
802 : // regardless of its actual version.
803 : //
804 : // If the existing database is formatted using a format major
805 : // version earlier than the one specified, Open will automatically
806 : // ratchet the database to the specified format major version.
807 : FormatMajorVersion FormatMajorVersion
808 :
809 : // FS provides the interface for persistent file storage.
810 : //
811 : // The default value uses the underlying operating system's file system.
812 : FS vfs.FS
813 :
814 : // KeySchema is the name of the key schema that should be used when writing
815 : // new sstables. There must be a key schema with this name defined in
816 : // KeySchemas. If not set, colblk.DefaultKeySchema is used to construct a
817 : // default key schema.
818 : KeySchema string
819 :
820 : // KeySchemas defines the set of known schemas of user keys. When columnar
821 : // blocks are in use (see FormatColumnarBlocks), the user may specify how a
822 : // key should be decomposed into columns. Each KeySchema must have a unique
823 : // name. The schema named by Options.KeySchema is used while writing
824 : // sstables during flushes and compactions.
825 : //
826 : // Multiple KeySchemas may be used over the lifetime of a database. Once a
827 : // KeySchema is used, it must be provided in KeySchemas in subsequent calls
828 : // to Open for perpetuity.
829 : KeySchemas sstable.KeySchemas
830 :
831 : // Lock, if set, must be a database lock acquired through LockDirectory for
832 : // the same directory passed to Open. If provided, Open will skip locking
833 : // the directory. Closing the database will not release the lock, and it's
834 : // the responsibility of the caller to release the lock after closing the
835 : // database.
836 : //
837 : // Open will enforce that the Lock passed locks the same directory passed to
838 : // Open. Concurrent calls to Open using the same Lock are detected and
839 : // prohibited.
840 : Lock *Lock
841 :
842 : // The count of L0 files necessary to trigger an L0 compaction.
843 : L0CompactionFileThreshold int
844 :
845 : // The amount of L0 read-amplification necessary to trigger an L0 compaction.
846 : L0CompactionThreshold int
847 :
848 : // Hard limit on L0 read-amplification, computed as the number of L0
849 : // sublevels. Writes are stopped when this threshold is reached.
850 : L0StopWritesThreshold int
851 :
852 : // The maximum number of bytes for LBase. The base level is the level which
853 : // L0 is compacted into. The base level is determined dynamically based on
854 : // the existing data in the LSM. The maximum number of bytes for other levels
855 : // is computed dynamically based on the base level's maximum size. When the
856 : // maximum number of bytes for a level is exceeded, compaction is requested.
857 : LBaseMaxBytes int64
858 :
859 : // Per-level options. Options for at least one level must be specified. The
860 : // options for the last level are used for all subsequent levels.
861 : Levels []LevelOptions
862 :
863 : // LoggerAndTracer will be used, if non-nil, else Logger will be used and
864 : // tracing will be a noop.
865 :
866 : // Logger used to write log messages.
867 : //
868 : // The default logger uses the Go standard library log package.
869 : Logger Logger
870 : // LoggerAndTracer is used for writing log messages and traces.
871 : LoggerAndTracer LoggerAndTracer
872 :
873 : // MaxManifestFileSize is the maximum size the MANIFEST file is allowed to
874 : // become. When the MANIFEST exceeds this size it is rolled over and a new
875 : // MANIFEST is created.
876 : MaxManifestFileSize int64
877 :
878 : // MaxOpenFiles is a soft limit on the number of open files that can be
879 : // used by the DB.
880 : //
881 : // The default value is 1000.
882 : MaxOpenFiles int
883 :
884 : // The size of a MemTable in steady state. The actual MemTable size starts at
885 : // min(256KB, MemTableSize) and doubles for each subsequent MemTable up to
886 : // MemTableSize. This reduces the memory pressure caused by MemTables for
887 : // short lived (test) DB instances. Note that more than one MemTable can be
888 : // in existence since flushing a MemTable involves creating a new one and
889 : // writing the contents of the old one in the
890 : // background. MemTableStopWritesThreshold places a hard limit on the size of
891 : // the queued MemTables.
892 : //
893 : // The default value is 4MB.
894 : MemTableSize uint64
895 :
896 : // Hard limit on the number of queued of MemTables. Writes are stopped when
897 : // the sum of the queued memtable sizes exceeds:
898 : // MemTableStopWritesThreshold * MemTableSize.
899 : //
900 : // This value should be at least 2 or writes will stop whenever a MemTable is
901 : // being flushed.
902 : //
903 : // The default value is 2.
904 : MemTableStopWritesThreshold int
905 :
906 : // Merger defines the associative merge operation to use for merging values
907 : // written with {Batch,DB}.Merge.
908 : //
909 : // The default merger concatenates values.
910 : Merger *Merger
911 :
912 : // MaxConcurrentCompactions is the upper bound on the value returned by
913 : // DB.GetAllowedWithoutPermission (reported to the CompactionScheduler).
914 : // More abstractly, it is a rough upper bound on the number of concurrent
915 : // compactions, not including download compactions (which have a separate
916 : // limit specified by MaxConcurrentDownloads).
917 : //
918 : // This is a rough upper bound since delete-only compactions (a) do not use
919 : // the CompactionScheduler, and (b) the CompactionScheduler may use other
920 : // criteria to decide on how many compactions to permit.
921 : //
922 : // Elaborating on (b), when the ConcurrencyLimitScheduler is being used, the
923 : // value returned by DB.GetAllowedWithoutPermission fully controls how many
924 : // compactions get to run. Other CompactionSchedulers may use additional
925 : // criteria, like resource availability.
926 : //
927 : // Elaborating on (a), we don't use the CompactionScheduler to schedule
928 : // delete-only compactions since they are expected to be almost free from a
929 : // CPU and disk usage perspective. Since the CompactionScheduler does not
930 : // know about their existence, the total running count can exceed this
931 : // value. For example, consider MaxConcurrentCompactions returns 3, and the
932 : // current value returned from DB.GetAllowedWithoutPermission is also 3. Say
933 : // 3 delete-only compactions are also running. Then the
934 : // ConcurrencyLimitScheduler can also start 3 other compactions, for a total
935 : // of 6.
936 : //
937 : // DB.GetAllowedWithoutPermission returns a value in the interval [1,
938 : // MaxConcurrentCompactions]. A value > 1 is returned:
939 : // - when L0 read-amplification passes the L0CompactionConcurrency threshold;
940 : // - when compaction debt passes the CompactionDebtConcurrency threshold;
941 : // - when there are multiple manual compactions waiting to run.
942 : //
943 : // MaxConcurrentCompactions() must be greater than 0.
944 : //
945 : // The default value is 1.
946 : MaxConcurrentCompactions func() int
947 :
948 : // MaxConcurrentDownloads specifies the maximum number of download
949 : // compactions. These are compactions that copy an external file to the local
950 : // store.
951 : //
952 : // This limit is independent of MaxConcurrentCompactions; at any point in
953 : // time, we may be running MaxConcurrentCompactions non-download compactions
954 : // and MaxConcurrentDownloads download compactions.
955 : //
956 : // MaxConcurrentDownloads() must be greater than 0.
957 : //
958 : // The default value is 1.
959 : MaxConcurrentDownloads func() int
960 :
961 : // DisableAutomaticCompactions dictates whether automatic compactions are
962 : // scheduled or not. The default is false (enabled). This option is only used
963 : // externally when running a manual compaction, and internally for tests.
964 : DisableAutomaticCompactions bool
965 :
966 : // DisableConsistencyCheck disables the consistency check that is performed on
967 : // open. Should only be used when a database cannot be opened normally (e.g.
968 : // some of the tables don't exist / aren't accessible).
969 : DisableConsistencyCheck bool
970 :
971 : // DisableTableStats dictates whether tables should be loaded asynchronously
972 : // to compute statistics that inform compaction heuristics. The collection
973 : // of table stats improves compaction of tombstones, reclaiming disk space
974 : // more quickly and in some cases reducing write amplification in the
975 : // presence of tombstones. Disabling table stats may be useful in tests
976 : // that require determinism as the asynchronicity of table stats collection
977 : // introduces significant nondeterminism.
978 : DisableTableStats bool
979 :
980 : // NoSyncOnClose decides whether the Pebble instance will enforce a
981 : // close-time synchronization (e.g., fdatasync() or sync_file_range())
982 : // on files it writes to. Setting this to true removes the guarantee for a
983 : // sync on close. Some implementations can still issue a non-blocking sync.
984 : NoSyncOnClose bool
985 :
986 : // NumPrevManifest is the number of non-current or older manifests which
987 : // we want to keep around for debugging purposes. By default, we're going
988 : // to keep one older manifest.
989 : NumPrevManifest int
990 :
991 : // ReadOnly indicates that the DB should be opened in read-only mode. Writes
992 : // to the DB will return an error, background compactions are disabled, and
993 : // the flush that normally occurs after replaying the WAL at startup is
994 : // disabled.
995 : ReadOnly bool
996 :
997 : // FileCache is an initialized FileCache which should be set as an
998 : // option if the DB needs to be initialized with a pre-existing file cache.
999 : // If FileCache is nil, then a file cache which is unique to the DB instance
1000 : // is created. FileCache can be shared between db instances by setting it here.
1001 : // The FileCache set here must use the same underlying cache as Options.Cache
1002 : // and pebble will panic otherwise.
1003 : FileCache *FileCache
1004 :
1005 : // BlockPropertyCollectors is a list of BlockPropertyCollector creation
1006 : // functions. A new BlockPropertyCollector is created for each sstable
1007 : // built and lives for the lifetime of writing that table.
1008 : BlockPropertyCollectors []func() BlockPropertyCollector
1009 :
1010 : // WALBytesPerSync sets the number of bytes to write to a WAL before calling
1011 : // Sync on it in the background. Just like with BytesPerSync above, this
1012 : // helps smooth out disk write latencies, and avoids cases where the OS
1013 : // writes a lot of buffered data to disk at once. However, this is less
1014 : // necessary with WALs, as many write operations already pass in
1015 : // Sync = true.
1016 : //
1017 : // The default value is 0, i.e. no background syncing. This matches the
1018 : // default behaviour in RocksDB.
1019 : WALBytesPerSync int
1020 :
1021 : // WALDir specifies the directory to store write-ahead logs (WALs) in. If
1022 : // empty (the default), WALs will be stored in the same directory as sstables
1023 : // (i.e. the directory passed to pebble.Open).
1024 : WALDir string
1025 :
1026 : // WALFailover may be set to configure Pebble to monitor writes to its
1027 : // write-ahead log and failover to writing write-ahead log entries to a
1028 : // secondary location (eg, a separate physical disk). WALFailover may be
1029 : // used to improve write availability in the presence of transient disk
1030 : // unavailability.
1031 : WALFailover *WALFailoverOptions
1032 :
1033 : // WALRecoveryDirs is a list of additional directories that should be
1034 : // scanned for the existence of additional write-ahead logs. WALRecoveryDirs
1035 : // is expected to be used when starting Pebble with a new WALDir or a new
1036 : // WALFailover configuration. The directories associated with the previous
1037 : // configuration may still contain WALs that are required for recovery of
1038 : // the current database state.
1039 : //
1040 : // If a previous WAL configuration may have stored WALs elsewhere but there
1041 : // is not a corresponding entry in WALRecoveryDirs, Open will error.
1042 : WALRecoveryDirs []wal.Dir
1043 :
1044 : // WALMinSyncInterval is the minimum duration between syncs of the WAL. If
1045 : // WAL syncs are requested faster than this interval, they will be
1046 : // artificially delayed. Introducing a small artificial delay (500us) between
1047 : // WAL syncs can allow more operations to arrive and reduce IO operations
1048 : // while having a minimal impact on throughput. This option is supplied as a
1049 : // closure in order to allow the value to be changed dynamically. The default
1050 : // value is 0.
1051 : //
1052 : // TODO(peter): rather than a closure, should there be another mechanism for
1053 : // changing options dynamically?
1054 : WALMinSyncInterval func() time.Duration
1055 :
1056 : // TargetByteDeletionRate is the rate (in bytes per second) at which sstable file
1057 : // deletions are limited to (under normal circumstances).
1058 : //
1059 : // Deletion pacing is used to slow down deletions when compactions finish up
1060 : // or readers close and newly-obsolete files need cleaning up. Deleting lots
1061 : // of files at once can cause disk latency to go up on some SSDs, which this
1062 : // functionality guards against.
1063 : //
1064 : // This value is only a best-effort target; the effective rate can be
1065 : // higher if deletions are falling behind or disk space is running low.
1066 : //
1067 : // Setting this to 0 disables deletion pacing, which is also the default.
1068 : TargetByteDeletionRate int
1069 :
1070 : // EnableSQLRowSpillMetrics specifies whether the Pebble instance will only be used
1071 : // to temporarily persist data spilled to disk for row-oriented SQL query execution.
1072 : EnableSQLRowSpillMetrics bool
1073 :
1074 : // AllocatorSizeClasses provides a sorted list containing the supported size
1075 : // classes of the underlying memory allocator. This provides hints to the
1076 : // sstable block writer's flushing policy to select block sizes that
1077 : // preemptively reduce internal fragmentation when loaded into the block cache.
1078 : AllocatorSizeClasses []int
1079 :
1080 : // private options are only used by internal tests or are used internally
1081 : // for facilitating upgrade paths of unconfigurable functionality.
1082 : private struct {
1083 : // disableDeleteOnlyCompactions prevents the scheduling of delete-only
1084 : // compactions that drop sstables wholy covered by range tombstones or
1085 : // range key tombstones.
1086 : disableDeleteOnlyCompactions bool
1087 :
1088 : // disableElisionOnlyCompactions prevents the scheduling of elision-only
1089 : // compactions that rewrite sstables in place in order to elide obsolete
1090 : // keys.
1091 : disableElisionOnlyCompactions bool
1092 :
1093 : // disableLazyCombinedIteration is a private option used by the
1094 : // metamorphic tests to test equivalence between lazy-combined iteration
1095 : // and constructing the range-key iterator upfront. It's a private
1096 : // option to avoid littering the public interface with options that we
1097 : // do not want to allow users to actually configure.
1098 : disableLazyCombinedIteration bool
1099 :
1100 : // testingAlwaysWaitForCleanup is set by some tests to force waiting for
1101 : // obsolete file deletion (to make events deterministic).
1102 : testingAlwaysWaitForCleanup bool
1103 :
1104 : // fsCloser holds a closer that should be invoked after a DB using these
1105 : // Options is closed. This is used to automatically stop the
1106 : // long-running goroutine associated with the disk-health-checking FS.
1107 : // See the initialization of FS in EnsureDefaults. Note that care has
1108 : // been taken to ensure that it is still safe to continue using the FS
1109 : // after this closer has been invoked. However, if write operations
1110 : // against the FS are made after the DB is closed, the FS may leak a
1111 : // goroutine indefinitely.
1112 : fsCloser io.Closer
1113 : }
1114 : }
1115 :
1116 : // WALFailoverOptions configures the WAL failover mechanics to use during
1117 : // transient write unavailability on the primary WAL volume.
1118 : type WALFailoverOptions struct {
1119 : // Secondary indicates the secondary directory and VFS to use in the event a
1120 : // write to the primary WAL stalls.
1121 : Secondary wal.Dir
1122 : // FailoverOptions provides configuration of the thresholds and intervals
1123 : // involved in WAL failover. If any of its fields are left unspecified,
1124 : // reasonable defaults will be used.
1125 : wal.FailoverOptions
1126 : }
1127 :
1128 : // ReadaheadConfig controls the use of read-ahead.
1129 : type ReadaheadConfig = objstorageprovider.ReadaheadConfig
1130 :
1131 : // JemallocSizeClasses exports sstable.JemallocSizeClasses.
1132 : var JemallocSizeClasses = sstable.JemallocSizeClasses
1133 :
1134 : // DebugCheckLevels calls CheckLevels on the provided database.
1135 : // It may be set in the DebugCheck field of Options to check
1136 : // level invariants whenever a new version is installed.
1137 1 : func DebugCheckLevels(db *DB) error {
1138 1 : return db.CheckLevels(nil)
1139 1 : }
1140 :
1141 : // EnsureDefaults ensures that the default values for all options are set if a
1142 : // valid value was not already specified.
1143 1 : func (o *Options) EnsureDefaults() {
1144 1 : if o.Cache == nil && o.CacheSize == 0 {
1145 1 : o.CacheSize = cacheDefaultSize
1146 1 : }
1147 1 : o.Comparer = o.Comparer.EnsureDefaults()
1148 1 :
1149 1 : if o.BytesPerSync <= 0 {
1150 1 : o.BytesPerSync = 512 << 10 // 512 KB
1151 1 : }
1152 1 : if o.Cleaner == nil {
1153 1 : o.Cleaner = DeleteCleaner{}
1154 1 : }
1155 :
1156 1 : if o.Experimental.DisableIngestAsFlushable == nil {
1157 1 : o.Experimental.DisableIngestAsFlushable = func() bool { return false }
1158 : }
1159 1 : if o.Experimental.L0CompactionConcurrency <= 0 {
1160 1 : o.Experimental.L0CompactionConcurrency = 10
1161 1 : }
1162 1 : if o.Experimental.CompactionDebtConcurrency <= 0 {
1163 1 : o.Experimental.CompactionDebtConcurrency = 1 << 30 // 1 GB
1164 1 : }
1165 1 : if o.KeySchema == "" && len(o.KeySchemas) == 0 {
1166 1 : ks := colblk.DefaultKeySchema(o.Comparer, 16 /* bundleSize */)
1167 1 : o.KeySchema = ks.Name
1168 1 : o.KeySchemas = sstable.MakeKeySchemas(&ks)
1169 1 : }
1170 1 : if o.L0CompactionThreshold <= 0 {
1171 1 : o.L0CompactionThreshold = 4
1172 1 : }
1173 1 : if o.L0CompactionFileThreshold <= 0 {
1174 1 : // Some justification for the default of 500:
1175 1 : // Why not smaller?:
1176 1 : // - The default target file size for L0 is 2MB, so 500 files is <= 1GB
1177 1 : // of data. At observed compaction speeds of > 20MB/s, L0 can be
1178 1 : // cleared of all files in < 1min, so this backlog is not huge.
1179 1 : // - 500 files is low overhead for instantiating L0 sublevels from
1180 1 : // scratch.
1181 1 : // - Lower values were observed to cause excessive and inefficient
1182 1 : // compactions out of L0 in a TPCC import benchmark.
1183 1 : // Why not larger?:
1184 1 : // - More than 1min to compact everything out of L0.
1185 1 : // - CockroachDB's admission control system uses a threshold of 1000
1186 1 : // files to start throttling writes to Pebble. Using 500 here gives
1187 1 : // us headroom between when Pebble should start compacting L0 and
1188 1 : // when the admission control threshold is reached.
1189 1 : //
1190 1 : // We can revisit this default in the future based on better
1191 1 : // experimental understanding.
1192 1 : //
1193 1 : // TODO(jackson): Experiment with slightly lower thresholds [or higher
1194 1 : // admission control thresholds] to see whether a higher L0 score at the
1195 1 : // threshold (currently 2.0) is necessary for some workloads to avoid
1196 1 : // starving L0 in favor of lower-level compactions.
1197 1 : o.L0CompactionFileThreshold = 500
1198 1 : }
1199 1 : if o.L0StopWritesThreshold <= 0 {
1200 1 : o.L0StopWritesThreshold = 12
1201 1 : }
1202 1 : if o.LBaseMaxBytes <= 0 {
1203 1 : o.LBaseMaxBytes = 64 << 20 // 64 MB
1204 1 : }
1205 1 : if o.Levels == nil {
1206 1 : o.Levels = make([]LevelOptions, 1)
1207 1 : for i := range o.Levels {
1208 1 : if i > 0 {
1209 0 : l := &o.Levels[i]
1210 0 : if l.TargetFileSize <= 0 {
1211 0 : l.TargetFileSize = o.Levels[i-1].TargetFileSize * 2
1212 0 : }
1213 : }
1214 1 : o.Levels[i].EnsureDefaults()
1215 : }
1216 1 : } else {
1217 1 : for i := range o.Levels {
1218 1 : o.Levels[i].EnsureDefaults()
1219 1 : }
1220 : }
1221 1 : if o.Logger == nil {
1222 1 : o.Logger = DefaultLogger
1223 1 : }
1224 1 : if o.EventListener == nil {
1225 1 : o.EventListener = &EventListener{}
1226 1 : }
1227 1 : o.EventListener.EnsureDefaults(o.Logger)
1228 1 : if o.MaxManifestFileSize == 0 {
1229 1 : o.MaxManifestFileSize = 128 << 20 // 128 MB
1230 1 : }
1231 1 : if o.MaxOpenFiles == 0 {
1232 1 : o.MaxOpenFiles = 1000
1233 1 : }
1234 1 : if o.MemTableSize <= 0 {
1235 1 : o.MemTableSize = 4 << 20 // 4 MB
1236 1 : }
1237 1 : if o.MemTableStopWritesThreshold <= 0 {
1238 1 : o.MemTableStopWritesThreshold = 2
1239 1 : }
1240 1 : if o.Merger == nil {
1241 1 : o.Merger = DefaultMerger
1242 1 : }
1243 1 : if o.MaxConcurrentCompactions == nil {
1244 1 : o.MaxConcurrentCompactions = func() int { return 1 }
1245 : }
1246 1 : if o.MaxConcurrentDownloads == nil {
1247 1 : o.MaxConcurrentDownloads = func() int { return 1 }
1248 : }
1249 1 : if o.NumPrevManifest <= 0 {
1250 1 : o.NumPrevManifest = 1
1251 1 : }
1252 :
1253 1 : if o.FormatMajorVersion == FormatDefault {
1254 1 : o.FormatMajorVersion = FormatMinSupported
1255 1 : if o.Experimental.CreateOnShared != remote.CreateOnSharedNone {
1256 1 : o.FormatMajorVersion = FormatMinForSharedObjects
1257 1 : }
1258 : }
1259 :
1260 1 : if o.FS == nil {
1261 1 : o.WithFSDefaults()
1262 1 : }
1263 1 : if o.FlushSplitBytes <= 0 {
1264 1 : o.FlushSplitBytes = 2 * o.Levels[0].TargetFileSize
1265 1 : }
1266 1 : if o.WALFailover != nil {
1267 1 : o.WALFailover.FailoverOptions.EnsureDefaults()
1268 1 : }
1269 1 : if o.Experimental.LevelMultiplier <= 0 {
1270 1 : o.Experimental.LevelMultiplier = defaultLevelMultiplier
1271 1 : }
1272 1 : if o.Experimental.ReadCompactionRate == 0 {
1273 1 : o.Experimental.ReadCompactionRate = 16000
1274 1 : }
1275 1 : if o.Experimental.ReadSamplingMultiplier == 0 {
1276 1 : o.Experimental.ReadSamplingMultiplier = 1 << 4
1277 1 : }
1278 1 : if o.Experimental.NumDeletionsThreshold == 0 {
1279 1 : o.Experimental.NumDeletionsThreshold = sstable.DefaultNumDeletionsThreshold
1280 1 : }
1281 1 : if o.Experimental.DeletionSizeRatioThreshold == 0 {
1282 1 : o.Experimental.DeletionSizeRatioThreshold = sstable.DefaultDeletionSizeRatioThreshold
1283 1 : }
1284 1 : if o.Experimental.TombstoneDenseCompactionThreshold == 0 {
1285 1 : o.Experimental.TombstoneDenseCompactionThreshold = 0.10
1286 1 : }
1287 1 : if o.Experimental.FileCacheShards <= 0 {
1288 1 : o.Experimental.FileCacheShards = runtime.GOMAXPROCS(0)
1289 1 : }
1290 1 : if o.Experimental.CPUWorkPermissionGranter == nil {
1291 1 : o.Experimental.CPUWorkPermissionGranter = defaultCPUWorkGranter{}
1292 1 : }
1293 1 : if o.Experimental.MultiLevelCompactionHeuristic == nil {
1294 1 : o.Experimental.MultiLevelCompactionHeuristic = WriteAmpHeuristic{}
1295 1 : }
1296 1 : if o.Experimental.CompactionScheduler == nil {
1297 1 : o.Experimental.CompactionScheduler = newConcurrencyLimitScheduler(defaultTimeSource{})
1298 1 : }
1299 :
1300 1 : o.initMaps()
1301 : }
1302 :
1303 : // DefaultOptions returns a new Options object with the default values set.
1304 1 : func DefaultOptions() *Options {
1305 1 : o := &Options{}
1306 1 : o.EnsureDefaults()
1307 1 : return o
1308 1 : }
1309 :
1310 : // WithFSDefaults configures the Options to wrap the configured filesystem with
1311 : // the default virtual file system middleware, like disk-health checking.
1312 1 : func (o *Options) WithFSDefaults() {
1313 1 : if o.FS == nil {
1314 1 : o.FS = vfs.Default
1315 1 : }
1316 1 : o.FS, o.private.fsCloser = vfs.WithDiskHealthChecks(o.FS, 5*time.Second, nil,
1317 1 : func(info vfs.DiskSlowInfo) {
1318 0 : o.EventListener.DiskSlow(info)
1319 0 : })
1320 : }
1321 :
1322 : // AddEventListener adds the provided event listener to the Options, in addition
1323 : // to any existing event listener.
1324 1 : func (o *Options) AddEventListener(l EventListener) {
1325 1 : if o.EventListener != nil {
1326 1 : l = TeeEventListener(l, *o.EventListener)
1327 1 : }
1328 1 : o.EventListener = &l
1329 : }
1330 :
1331 : // initMaps initializes the Comparers, Filters, and Mergers maps.
1332 1 : func (o *Options) initMaps() {
1333 1 : for i := range o.Levels {
1334 1 : l := &o.Levels[i]
1335 1 : if l.FilterPolicy != nil {
1336 1 : if o.Filters == nil {
1337 1 : o.Filters = make(map[string]FilterPolicy)
1338 1 : }
1339 1 : name := l.FilterPolicy.Name()
1340 1 : if _, ok := o.Filters[name]; !ok {
1341 1 : o.Filters[name] = l.FilterPolicy
1342 1 : }
1343 : }
1344 : }
1345 : }
1346 :
1347 : // Level returns the LevelOptions for the specified level.
1348 1 : func (o *Options) Level(level int) LevelOptions {
1349 1 : if level < len(o.Levels) {
1350 1 : return o.Levels[level]
1351 1 : }
1352 1 : n := len(o.Levels) - 1
1353 1 : l := o.Levels[n]
1354 1 : for i := n; i < level; i++ {
1355 1 : l.TargetFileSize *= 2
1356 1 : }
1357 1 : return l
1358 : }
1359 :
1360 : // Clone creates a shallow-copy of the supplied options.
1361 1 : func (o *Options) Clone() *Options {
1362 1 : n := &Options{}
1363 1 : if o != nil {
1364 1 : *n = *o
1365 1 : }
1366 1 : return n
1367 : }
1368 :
1369 1 : func filterPolicyName(p FilterPolicy) string {
1370 1 : if p == nil {
1371 1 : return "none"
1372 1 : }
1373 1 : return p.Name()
1374 : }
1375 :
1376 1 : func (o *Options) String() string {
1377 1 : var buf bytes.Buffer
1378 1 :
1379 1 : cacheSize := o.CacheSize
1380 1 : if o.Cache != nil {
1381 1 : cacheSize = o.Cache.MaxSize()
1382 1 : }
1383 :
1384 1 : fmt.Fprintf(&buf, "[Version]\n")
1385 1 : fmt.Fprintf(&buf, " pebble_version=0.1\n")
1386 1 : fmt.Fprintf(&buf, "\n")
1387 1 : fmt.Fprintf(&buf, "[Options]\n")
1388 1 : fmt.Fprintf(&buf, " bytes_per_sync=%d\n", o.BytesPerSync)
1389 1 : fmt.Fprintf(&buf, " cache_size=%d\n", cacheSize)
1390 1 : fmt.Fprintf(&buf, " cleaner=%s\n", o.Cleaner)
1391 1 : fmt.Fprintf(&buf, " compaction_debt_concurrency=%d\n", o.Experimental.CompactionDebtConcurrency)
1392 1 : fmt.Fprintf(&buf, " comparer=%s\n", o.Comparer.Name)
1393 1 : fmt.Fprintf(&buf, " disable_wal=%t\n", o.DisableWAL)
1394 1 : if o.Experimental.DisableIngestAsFlushable != nil && o.Experimental.DisableIngestAsFlushable() {
1395 1 : fmt.Fprintf(&buf, " disable_ingest_as_flushable=%t\n", true)
1396 1 : }
1397 1 : if o.Experimental.EnableColumnarBlocks != nil && o.Experimental.EnableColumnarBlocks() {
1398 1 : fmt.Fprintf(&buf, " enable_columnar_blocks=%t\n", true)
1399 1 : }
1400 1 : fmt.Fprintf(&buf, " flush_delay_delete_range=%s\n", o.FlushDelayDeleteRange)
1401 1 : fmt.Fprintf(&buf, " flush_delay_range_key=%s\n", o.FlushDelayRangeKey)
1402 1 : fmt.Fprintf(&buf, " flush_split_bytes=%d\n", o.FlushSplitBytes)
1403 1 : fmt.Fprintf(&buf, " format_major_version=%d\n", o.FormatMajorVersion)
1404 1 : fmt.Fprintf(&buf, " key_schema=%s\n", o.KeySchema)
1405 1 : fmt.Fprintf(&buf, " l0_compaction_concurrency=%d\n", o.Experimental.L0CompactionConcurrency)
1406 1 : fmt.Fprintf(&buf, " l0_compaction_file_threshold=%d\n", o.L0CompactionFileThreshold)
1407 1 : fmt.Fprintf(&buf, " l0_compaction_threshold=%d\n", o.L0CompactionThreshold)
1408 1 : fmt.Fprintf(&buf, " l0_stop_writes_threshold=%d\n", o.L0StopWritesThreshold)
1409 1 : fmt.Fprintf(&buf, " lbase_max_bytes=%d\n", o.LBaseMaxBytes)
1410 1 : if o.Experimental.LevelMultiplier != defaultLevelMultiplier {
1411 1 : fmt.Fprintf(&buf, " level_multiplier=%d\n", o.Experimental.LevelMultiplier)
1412 1 : }
1413 1 : fmt.Fprintf(&buf, " max_concurrent_compactions=%d\n", o.MaxConcurrentCompactions())
1414 1 : fmt.Fprintf(&buf, " max_concurrent_downloads=%d\n", o.MaxConcurrentDownloads())
1415 1 : fmt.Fprintf(&buf, " max_manifest_file_size=%d\n", o.MaxManifestFileSize)
1416 1 : fmt.Fprintf(&buf, " max_open_files=%d\n", o.MaxOpenFiles)
1417 1 : fmt.Fprintf(&buf, " mem_table_size=%d\n", o.MemTableSize)
1418 1 : fmt.Fprintf(&buf, " mem_table_stop_writes_threshold=%d\n", o.MemTableStopWritesThreshold)
1419 1 : fmt.Fprintf(&buf, " min_deletion_rate=%d\n", o.TargetByteDeletionRate)
1420 1 : fmt.Fprintf(&buf, " merger=%s\n", o.Merger.Name)
1421 1 : if o.Experimental.MultiLevelCompactionHeuristic != nil {
1422 1 : fmt.Fprintf(&buf, " multilevel_compaction_heuristic=%s\n", o.Experimental.MultiLevelCompactionHeuristic.String())
1423 1 : }
1424 1 : fmt.Fprintf(&buf, " read_compaction_rate=%d\n", o.Experimental.ReadCompactionRate)
1425 1 : fmt.Fprintf(&buf, " read_sampling_multiplier=%d\n", o.Experimental.ReadSamplingMultiplier)
1426 1 : fmt.Fprintf(&buf, " num_deletions_threshold=%d\n", o.Experimental.NumDeletionsThreshold)
1427 1 : fmt.Fprintf(&buf, " deletion_size_ratio_threshold=%f\n", o.Experimental.DeletionSizeRatioThreshold)
1428 1 : fmt.Fprintf(&buf, " tombstone_dense_compaction_threshold=%f\n", o.Experimental.TombstoneDenseCompactionThreshold)
1429 1 : // We no longer care about strict_wal_tail, but set it to true in case an
1430 1 : // older version reads the options.
1431 1 : fmt.Fprintf(&buf, " strict_wal_tail=%t\n", true)
1432 1 : fmt.Fprintf(&buf, " table_cache_shards=%d\n", o.Experimental.FileCacheShards)
1433 1 : fmt.Fprintf(&buf, " validate_on_ingest=%t\n", o.Experimental.ValidateOnIngest)
1434 1 : fmt.Fprintf(&buf, " wal_dir=%s\n", o.WALDir)
1435 1 : fmt.Fprintf(&buf, " wal_bytes_per_sync=%d\n", o.WALBytesPerSync)
1436 1 : fmt.Fprintf(&buf, " max_writer_concurrency=%d\n", o.Experimental.MaxWriterConcurrency)
1437 1 : fmt.Fprintf(&buf, " force_writer_parallelism=%t\n", o.Experimental.ForceWriterParallelism)
1438 1 : fmt.Fprintf(&buf, " secondary_cache_size_bytes=%d\n", o.Experimental.SecondaryCacheSizeBytes)
1439 1 : fmt.Fprintf(&buf, " create_on_shared=%d\n", o.Experimental.CreateOnShared)
1440 1 :
1441 1 : // Private options.
1442 1 : //
1443 1 : // These options are only encoded if true, because we do not want them to
1444 1 : // appear in production serialized Options files, since they're testing-only
1445 1 : // options. They're only serialized when true, which still ensures that the
1446 1 : // metamorphic tests may propagate them to subprocesses.
1447 1 : if o.private.disableDeleteOnlyCompactions {
1448 1 : fmt.Fprintln(&buf, " disable_delete_only_compactions=true")
1449 1 : }
1450 1 : if o.private.disableElisionOnlyCompactions {
1451 1 : fmt.Fprintln(&buf, " disable_elision_only_compactions=true")
1452 1 : }
1453 1 : if o.private.disableLazyCombinedIteration {
1454 1 : fmt.Fprintln(&buf, " disable_lazy_combined_iteration=true")
1455 1 : }
1456 :
1457 1 : if o.WALFailover != nil {
1458 1 : unhealthyThreshold, _ := o.WALFailover.FailoverOptions.UnhealthyOperationLatencyThreshold()
1459 1 : fmt.Fprintf(&buf, "\n")
1460 1 : fmt.Fprintf(&buf, "[WAL Failover]\n")
1461 1 : fmt.Fprintf(&buf, " secondary_dir=%s\n", o.WALFailover.Secondary.Dirname)
1462 1 : fmt.Fprintf(&buf, " primary_dir_probe_interval=%s\n", o.WALFailover.FailoverOptions.PrimaryDirProbeInterval)
1463 1 : fmt.Fprintf(&buf, " healthy_probe_latency_threshold=%s\n", o.WALFailover.FailoverOptions.HealthyProbeLatencyThreshold)
1464 1 : fmt.Fprintf(&buf, " healthy_interval=%s\n", o.WALFailover.FailoverOptions.HealthyInterval)
1465 1 : fmt.Fprintf(&buf, " unhealthy_sampling_interval=%s\n", o.WALFailover.FailoverOptions.UnhealthySamplingInterval)
1466 1 : fmt.Fprintf(&buf, " unhealthy_operation_latency_threshold=%s\n", unhealthyThreshold)
1467 1 : fmt.Fprintf(&buf, " elevated_write_stall_threshold_lag=%s\n", o.WALFailover.FailoverOptions.ElevatedWriteStallThresholdLag)
1468 1 : }
1469 :
1470 1 : for i := range o.Levels {
1471 1 : l := &o.Levels[i]
1472 1 : fmt.Fprintf(&buf, "\n")
1473 1 : fmt.Fprintf(&buf, "[Level \"%d\"]\n", i)
1474 1 : fmt.Fprintf(&buf, " block_restart_interval=%d\n", l.BlockRestartInterval)
1475 1 : fmt.Fprintf(&buf, " block_size=%d\n", l.BlockSize)
1476 1 : fmt.Fprintf(&buf, " block_size_threshold=%d\n", l.BlockSizeThreshold)
1477 1 : fmt.Fprintf(&buf, " compression=%s\n", resolveDefaultCompression(l.Compression()))
1478 1 : fmt.Fprintf(&buf, " filter_policy=%s\n", filterPolicyName(l.FilterPolicy))
1479 1 : fmt.Fprintf(&buf, " filter_type=%s\n", l.FilterType)
1480 1 : fmt.Fprintf(&buf, " index_block_size=%d\n", l.IndexBlockSize)
1481 1 : fmt.Fprintf(&buf, " target_file_size=%d\n", l.TargetFileSize)
1482 1 : }
1483 :
1484 1 : return buf.String()
1485 : }
1486 :
1487 : type parseOptionsFuncs struct {
1488 : visitNewSection func(i, j int, section string) error
1489 : visitKeyValue func(i, j int, section, key, value string) error
1490 : visitCommentOrWhitespace func(i, j int, whitespace string) error
1491 : }
1492 :
1493 : // parseOptions takes options serialized by Options.String() and parses them
1494 : // into keys and values. It calls fns.visitNewSection for the beginning of each
1495 : // new section, fns.visitKeyValue for each key-value pair, and
1496 : // visitCommentOrWhitespace for comments and whitespace between key-value pairs.
1497 1 : func parseOptions(s string, fns parseOptionsFuncs) error {
1498 1 : var section, mappedSection string
1499 1 : i := 0
1500 1 : for i < len(s) {
1501 1 : rem := s[i:]
1502 1 : j := strings.IndexByte(rem, '\n')
1503 1 : if j < 0 {
1504 1 : j = len(rem)
1505 1 : } else {
1506 1 : j += 1 // Include the newline.
1507 1 : }
1508 1 : line := strings.TrimSpace(s[i : i+j])
1509 1 : startOff, endOff := i, i+j
1510 1 : i += j
1511 1 :
1512 1 : if len(line) == 0 || line[0] == ';' || line[0] == '#' {
1513 1 : // Skip blank lines and comments.
1514 1 : if fns.visitCommentOrWhitespace != nil {
1515 1 : if err := fns.visitCommentOrWhitespace(startOff, endOff, line); err != nil {
1516 0 : return err
1517 0 : }
1518 : }
1519 1 : continue
1520 : }
1521 1 : n := len(line)
1522 1 : if line[0] == '[' && line[n-1] == ']' {
1523 1 : // Parse section.
1524 1 : section = line[1 : n-1]
1525 1 : // RocksDB uses a similar (INI-style) syntax for the OPTIONS file, but
1526 1 : // different section names and keys. The "CFOptions ..." paths are the
1527 1 : // RocksDB versions which we map to the Pebble paths.
1528 1 : mappedSection = section
1529 1 : if section == `CFOptions "default"` {
1530 1 : mappedSection = "Options"
1531 1 : }
1532 1 : if fns.visitNewSection != nil {
1533 1 : if err := fns.visitNewSection(startOff, endOff, mappedSection); err != nil {
1534 0 : return err
1535 0 : }
1536 : }
1537 1 : continue
1538 : }
1539 :
1540 1 : pos := strings.Index(line, "=")
1541 1 : if pos < 0 {
1542 1 : const maxLen = 50
1543 1 : if len(line) > maxLen {
1544 0 : line = line[:maxLen-3] + "..."
1545 0 : }
1546 1 : return base.CorruptionErrorf("invalid key=value syntax: %q", errors.Safe(line))
1547 : }
1548 :
1549 1 : key := strings.TrimSpace(line[:pos])
1550 1 : value := strings.TrimSpace(line[pos+1:])
1551 1 :
1552 1 : if section == `CFOptions "default"` {
1553 1 : switch key {
1554 1 : case "comparator":
1555 1 : key = "comparer"
1556 1 : case "merge_operator":
1557 1 : key = "merger"
1558 : }
1559 : }
1560 1 : if fns.visitKeyValue != nil {
1561 1 : if err := fns.visitKeyValue(startOff, endOff, mappedSection, key, value); err != nil {
1562 1 : return err
1563 1 : }
1564 : }
1565 : }
1566 1 : return nil
1567 : }
1568 :
1569 : // ParseHooks contains callbacks to create options fields which can have
1570 : // user-defined implementations.
1571 : type ParseHooks struct {
1572 : NewCleaner func(name string) (Cleaner, error)
1573 : NewComparer func(name string) (*Comparer, error)
1574 : NewFilterPolicy func(name string) (FilterPolicy, error)
1575 : NewKeySchema func(name string) (KeySchema, error)
1576 : NewMerger func(name string) (*Merger, error)
1577 : SkipUnknown func(name, value string) bool
1578 : }
1579 :
1580 : // Parse parses the options from the specified string. Note that certain
1581 : // options cannot be parsed into populated fields. For example, comparer and
1582 : // merger.
1583 1 : func (o *Options) Parse(s string, hooks *ParseHooks) error {
1584 1 : visitKeyValue := func(i, j int, section, key, value string) error {
1585 1 : // WARNING: DO NOT remove entries from the switches below because doing so
1586 1 : // causes a key previously written to the OPTIONS file to be considered unknown,
1587 1 : // a backwards incompatible change. Instead, leave in support for parsing the
1588 1 : // key but simply don't parse the value.
1589 1 :
1590 1 : parseComparer := func(name string) (*Comparer, error) {
1591 1 : switch name {
1592 1 : case DefaultComparer.Name:
1593 1 : return DefaultComparer, nil
1594 1 : case testkeys.Comparer.Name:
1595 1 : return testkeys.Comparer, nil
1596 1 : default:
1597 1 : if hooks != nil && hooks.NewComparer != nil {
1598 1 : return hooks.NewComparer(name)
1599 1 : }
1600 1 : return nil, nil
1601 : }
1602 : }
1603 :
1604 1 : switch {
1605 1 : case section == "Version":
1606 1 : switch key {
1607 1 : case "pebble_version":
1608 0 : default:
1609 0 : if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section+"."+key, value) {
1610 0 : return nil
1611 0 : }
1612 0 : return errors.Errorf("pebble: unknown option: %s.%s",
1613 0 : errors.Safe(section), errors.Safe(key))
1614 : }
1615 1 : return nil
1616 :
1617 1 : case section == "Options":
1618 1 : var err error
1619 1 : switch key {
1620 1 : case "bytes_per_sync":
1621 1 : o.BytesPerSync, err = strconv.Atoi(value)
1622 1 : case "cache_size":
1623 1 : o.CacheSize, err = strconv.ParseInt(value, 10, 64)
1624 1 : case "cleaner":
1625 1 : switch value {
1626 1 : case "archive":
1627 1 : o.Cleaner = ArchiveCleaner{}
1628 1 : case "delete":
1629 1 : o.Cleaner = DeleteCleaner{}
1630 0 : default:
1631 0 : if hooks != nil && hooks.NewCleaner != nil {
1632 0 : o.Cleaner, err = hooks.NewCleaner(value)
1633 0 : }
1634 : }
1635 1 : case "comparer":
1636 1 : var comparer *Comparer
1637 1 : comparer, err = parseComparer(value)
1638 1 : if comparer != nil {
1639 1 : o.Comparer = comparer
1640 1 : }
1641 1 : case "compaction_debt_concurrency":
1642 1 : o.Experimental.CompactionDebtConcurrency, err = strconv.ParseUint(value, 10, 64)
1643 0 : case "delete_range_flush_delay":
1644 0 : // NB: This is a deprecated serialization of the
1645 0 : // `flush_delay_delete_range`.
1646 0 : o.FlushDelayDeleteRange, err = time.ParseDuration(value)
1647 1 : case "disable_delete_only_compactions":
1648 1 : o.private.disableDeleteOnlyCompactions, err = strconv.ParseBool(value)
1649 1 : case "disable_elision_only_compactions":
1650 1 : o.private.disableElisionOnlyCompactions, err = strconv.ParseBool(value)
1651 1 : case "disable_ingest_as_flushable":
1652 1 : var v bool
1653 1 : v, err = strconv.ParseBool(value)
1654 1 : if err == nil {
1655 1 : o.Experimental.DisableIngestAsFlushable = func() bool { return v }
1656 : }
1657 1 : case "disable_lazy_combined_iteration":
1658 1 : o.private.disableLazyCombinedIteration, err = strconv.ParseBool(value)
1659 1 : case "disable_wal":
1660 1 : o.DisableWAL, err = strconv.ParseBool(value)
1661 1 : case "enable_columnar_blocks":
1662 1 : var v bool
1663 1 : if v, err = strconv.ParseBool(value); err == nil {
1664 1 : o.Experimental.EnableColumnarBlocks = func() bool { return v }
1665 : }
1666 1 : case "flush_delay_delete_range":
1667 1 : o.FlushDelayDeleteRange, err = time.ParseDuration(value)
1668 1 : case "flush_delay_range_key":
1669 1 : o.FlushDelayRangeKey, err = time.ParseDuration(value)
1670 1 : case "flush_split_bytes":
1671 1 : o.FlushSplitBytes, err = strconv.ParseInt(value, 10, 64)
1672 1 : case "format_major_version":
1673 1 : // NB: The version written here may be stale. Open does
1674 1 : // not use the format major version encoded in the
1675 1 : // OPTIONS file other than to validate that the encoded
1676 1 : // version is valid right here.
1677 1 : var v uint64
1678 1 : v, err = strconv.ParseUint(value, 10, 64)
1679 1 : if vers := FormatMajorVersion(v); vers > internalFormatNewest || vers == FormatDefault {
1680 0 : err = errors.Newf("unsupported format major version %d", o.FormatMajorVersion)
1681 0 : }
1682 1 : if err == nil {
1683 1 : o.FormatMajorVersion = FormatMajorVersion(v)
1684 1 : }
1685 1 : case "key_schema":
1686 1 : o.KeySchema = value
1687 1 : if o.KeySchemas == nil {
1688 1 : o.KeySchemas = make(map[string]*KeySchema)
1689 1 : }
1690 1 : if _, ok := o.KeySchemas[o.KeySchema]; !ok {
1691 1 : if strings.HasPrefix(value, "DefaultKeySchema(") && strings.HasSuffix(value, ")") {
1692 1 : argsStr := strings.TrimSuffix(strings.TrimPrefix(value, "DefaultKeySchema("), ")")
1693 1 : args := strings.FieldsFunc(argsStr, func(r rune) bool {
1694 1 : return unicode.IsSpace(r) || r == ','
1695 1 : })
1696 1 : var comparer *base.Comparer
1697 1 : var bundleSize int
1698 1 : comparer, err = parseComparer(args[0])
1699 1 : if err == nil {
1700 1 : bundleSize, err = strconv.Atoi(args[1])
1701 1 : }
1702 1 : if err == nil {
1703 1 : schema := colblk.DefaultKeySchema(comparer, bundleSize)
1704 1 : o.KeySchema = schema.Name
1705 1 : o.KeySchemas[o.KeySchema] = &schema
1706 1 : }
1707 0 : } else if hooks != nil && hooks.NewKeySchema != nil {
1708 0 : var schema KeySchema
1709 0 : schema, err = hooks.NewKeySchema(value)
1710 0 : if err == nil {
1711 0 : o.KeySchemas[value] = &schema
1712 0 : }
1713 : }
1714 : }
1715 1 : case "l0_compaction_concurrency":
1716 1 : o.Experimental.L0CompactionConcurrency, err = strconv.Atoi(value)
1717 1 : case "l0_compaction_file_threshold":
1718 1 : o.L0CompactionFileThreshold, err = strconv.Atoi(value)
1719 1 : case "l0_compaction_threshold":
1720 1 : o.L0CompactionThreshold, err = strconv.Atoi(value)
1721 1 : case "l0_stop_writes_threshold":
1722 1 : o.L0StopWritesThreshold, err = strconv.Atoi(value)
1723 0 : case "l0_sublevel_compactions":
1724 : // Do nothing; option existed in older versions of pebble.
1725 1 : case "lbase_max_bytes":
1726 1 : o.LBaseMaxBytes, err = strconv.ParseInt(value, 10, 64)
1727 1 : case "level_multiplier":
1728 1 : o.Experimental.LevelMultiplier, err = strconv.Atoi(value)
1729 1 : case "max_concurrent_compactions":
1730 1 : var concurrentCompactions int
1731 1 : concurrentCompactions, err = strconv.Atoi(value)
1732 1 : if concurrentCompactions <= 0 {
1733 0 : err = errors.New("max_concurrent_compactions cannot be <= 0")
1734 1 : } else {
1735 1 : o.MaxConcurrentCompactions = func() int { return concurrentCompactions }
1736 : }
1737 1 : case "max_concurrent_downloads":
1738 1 : var concurrentDownloads int
1739 1 : concurrentDownloads, err = strconv.Atoi(value)
1740 1 : if concurrentDownloads <= 0 {
1741 0 : err = errors.New("max_concurrent_compactions cannot be <= 0")
1742 1 : } else {
1743 1 : o.MaxConcurrentDownloads = func() int { return concurrentDownloads }
1744 : }
1745 1 : case "max_manifest_file_size":
1746 1 : o.MaxManifestFileSize, err = strconv.ParseInt(value, 10, 64)
1747 1 : case "max_open_files":
1748 1 : o.MaxOpenFiles, err = strconv.Atoi(value)
1749 1 : case "mem_table_size":
1750 1 : o.MemTableSize, err = strconv.ParseUint(value, 10, 64)
1751 1 : case "mem_table_stop_writes_threshold":
1752 1 : o.MemTableStopWritesThreshold, err = strconv.Atoi(value)
1753 0 : case "min_compaction_rate":
1754 : // Do nothing; option existed in older versions of pebble, and
1755 : // may be meaningful again eventually.
1756 1 : case "min_deletion_rate":
1757 1 : o.TargetByteDeletionRate, err = strconv.Atoi(value)
1758 0 : case "min_flush_rate":
1759 : // Do nothing; option existed in older versions of pebble, and
1760 : // may be meaningful again eventually.
1761 1 : case "multilevel_compaction_heuristic":
1762 1 : switch {
1763 1 : case value == "none":
1764 1 : o.Experimental.MultiLevelCompactionHeuristic = NoMultiLevel{}
1765 1 : case strings.HasPrefix(value, "wamp"):
1766 1 : fields := strings.FieldsFunc(strings.TrimPrefix(value, "wamp"), func(r rune) bool {
1767 1 : return unicode.IsSpace(r) || r == ',' || r == '(' || r == ')'
1768 1 : })
1769 1 : if len(fields) != 2 {
1770 0 : err = errors.Newf("require 2 arguments")
1771 0 : }
1772 1 : var h WriteAmpHeuristic
1773 1 : if err == nil {
1774 1 : h.AddPropensity, err = strconv.ParseFloat(fields[0], 64)
1775 1 : }
1776 1 : if err == nil {
1777 1 : h.AllowL0, err = strconv.ParseBool(fields[1])
1778 1 : }
1779 1 : if err == nil {
1780 1 : o.Experimental.MultiLevelCompactionHeuristic = h
1781 1 : } else {
1782 0 : err = errors.Wrapf(err, "unexpected wamp heuristic arguments: %s", value)
1783 0 : }
1784 0 : default:
1785 0 : err = errors.Newf("unrecognized multilevel compaction heuristic: %s", value)
1786 : }
1787 0 : case "point_tombstone_weight":
1788 : // Do nothing; deprecated.
1789 1 : case "strict_wal_tail":
1790 1 : var strictWALTail bool
1791 1 : strictWALTail, err = strconv.ParseBool(value)
1792 1 : if err == nil && !strictWALTail {
1793 0 : err = errors.Newf("reading from versions with strict_wal_tail=false no longer supported")
1794 0 : }
1795 1 : case "merger":
1796 1 : switch value {
1797 0 : case "nullptr":
1798 0 : o.Merger = nil
1799 1 : case "pebble.concatenate":
1800 1 : o.Merger = DefaultMerger
1801 1 : default:
1802 1 : if hooks != nil && hooks.NewMerger != nil {
1803 1 : o.Merger, err = hooks.NewMerger(value)
1804 1 : }
1805 : }
1806 1 : case "read_compaction_rate":
1807 1 : o.Experimental.ReadCompactionRate, err = strconv.ParseInt(value, 10, 64)
1808 1 : case "read_sampling_multiplier":
1809 1 : o.Experimental.ReadSamplingMultiplier, err = strconv.ParseInt(value, 10, 64)
1810 1 : case "num_deletions_threshold":
1811 1 : o.Experimental.NumDeletionsThreshold, err = strconv.Atoi(value)
1812 1 : case "deletion_size_ratio_threshold":
1813 1 : val, parseErr := strconv.ParseFloat(value, 32)
1814 1 : o.Experimental.DeletionSizeRatioThreshold = float32(val)
1815 1 : err = parseErr
1816 1 : case "tombstone_dense_compaction_threshold":
1817 1 : o.Experimental.TombstoneDenseCompactionThreshold, err = strconv.ParseFloat(value, 64)
1818 1 : case "table_cache_shards":
1819 1 : o.Experimental.FileCacheShards, err = strconv.Atoi(value)
1820 0 : case "table_format":
1821 0 : switch value {
1822 0 : case "leveldb":
1823 0 : case "rocksdbv2":
1824 0 : default:
1825 0 : return errors.Errorf("pebble: unknown table format: %q", errors.Safe(value))
1826 : }
1827 1 : case "table_property_collectors":
1828 : // No longer implemented; ignore.
1829 1 : case "validate_on_ingest":
1830 1 : o.Experimental.ValidateOnIngest, err = strconv.ParseBool(value)
1831 1 : case "wal_dir":
1832 1 : o.WALDir = value
1833 1 : case "wal_bytes_per_sync":
1834 1 : o.WALBytesPerSync, err = strconv.Atoi(value)
1835 1 : case "max_writer_concurrency":
1836 1 : o.Experimental.MaxWriterConcurrency, err = strconv.Atoi(value)
1837 1 : case "force_writer_parallelism":
1838 1 : o.Experimental.ForceWriterParallelism, err = strconv.ParseBool(value)
1839 1 : case "secondary_cache_size_bytes":
1840 1 : o.Experimental.SecondaryCacheSizeBytes, err = strconv.ParseInt(value, 10, 64)
1841 1 : case "create_on_shared":
1842 1 : var createOnSharedInt int64
1843 1 : createOnSharedInt, err = strconv.ParseInt(value, 10, 64)
1844 1 : o.Experimental.CreateOnShared = remote.CreateOnSharedStrategy(createOnSharedInt)
1845 0 : default:
1846 0 : if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section+"."+key, value) {
1847 0 : return nil
1848 0 : }
1849 0 : return errors.Errorf("pebble: unknown option: %s.%s",
1850 0 : errors.Safe(section), errors.Safe(key))
1851 : }
1852 1 : return err
1853 :
1854 1 : case section == "WAL Failover":
1855 1 : if o.WALFailover == nil {
1856 1 : o.WALFailover = new(WALFailoverOptions)
1857 1 : }
1858 1 : var err error
1859 1 : switch key {
1860 1 : case "secondary_dir":
1861 1 : o.WALFailover.Secondary = wal.Dir{Dirname: value, FS: vfs.Default}
1862 1 : case "primary_dir_probe_interval":
1863 1 : o.WALFailover.PrimaryDirProbeInterval, err = time.ParseDuration(value)
1864 1 : case "healthy_probe_latency_threshold":
1865 1 : o.WALFailover.HealthyProbeLatencyThreshold, err = time.ParseDuration(value)
1866 1 : case "healthy_interval":
1867 1 : o.WALFailover.HealthyInterval, err = time.ParseDuration(value)
1868 1 : case "unhealthy_sampling_interval":
1869 1 : o.WALFailover.UnhealthySamplingInterval, err = time.ParseDuration(value)
1870 1 : case "unhealthy_operation_latency_threshold":
1871 1 : var threshold time.Duration
1872 1 : threshold, err = time.ParseDuration(value)
1873 1 : o.WALFailover.UnhealthyOperationLatencyThreshold = func() (time.Duration, bool) {
1874 1 : return threshold, true
1875 1 : }
1876 1 : case "elevated_write_stall_threshold_lag":
1877 1 : o.WALFailover.ElevatedWriteStallThresholdLag, err = time.ParseDuration(value)
1878 0 : default:
1879 0 : if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section+"."+key, value) {
1880 0 : return nil
1881 0 : }
1882 0 : return errors.Errorf("pebble: unknown option: %s.%s",
1883 0 : errors.Safe(section), errors.Safe(key))
1884 : }
1885 1 : return err
1886 :
1887 1 : case strings.HasPrefix(section, "Level "):
1888 1 : var index int
1889 1 : if n, err := fmt.Sscanf(section, `Level "%d"`, &index); err != nil {
1890 0 : return err
1891 1 : } else if n != 1 {
1892 0 : if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section, value) {
1893 0 : return nil
1894 0 : }
1895 0 : return errors.Errorf("pebble: unknown section: %q", errors.Safe(section))
1896 : }
1897 :
1898 1 : if len(o.Levels) <= index {
1899 1 : newLevels := make([]LevelOptions, index+1)
1900 1 : copy(newLevels, o.Levels)
1901 1 : o.Levels = newLevels
1902 1 : }
1903 1 : l := &o.Levels[index]
1904 1 :
1905 1 : var err error
1906 1 : switch key {
1907 1 : case "block_restart_interval":
1908 1 : l.BlockRestartInterval, err = strconv.Atoi(value)
1909 1 : case "block_size":
1910 1 : l.BlockSize, err = strconv.Atoi(value)
1911 1 : case "block_size_threshold":
1912 1 : l.BlockSizeThreshold, err = strconv.Atoi(value)
1913 1 : case "compression":
1914 1 : switch value {
1915 0 : case "Default":
1916 0 : l.Compression = func() Compression { return DefaultCompression }
1917 1 : case "NoCompression":
1918 1 : l.Compression = func() Compression { return NoCompression }
1919 1 : case "Snappy":
1920 1 : l.Compression = func() Compression { return SnappyCompression }
1921 1 : case "ZSTD":
1922 1 : l.Compression = func() Compression { return ZstdCompression }
1923 0 : default:
1924 0 : return errors.Errorf("pebble: unknown compression: %q", errors.Safe(value))
1925 : }
1926 1 : case "filter_policy":
1927 1 : if hooks != nil && hooks.NewFilterPolicy != nil {
1928 1 : l.FilterPolicy, err = hooks.NewFilterPolicy(value)
1929 1 : }
1930 1 : case "filter_type":
1931 1 : switch value {
1932 1 : case "table":
1933 1 : l.FilterType = TableFilter
1934 0 : default:
1935 0 : return errors.Errorf("pebble: unknown filter type: %q", errors.Safe(value))
1936 : }
1937 1 : case "index_block_size":
1938 1 : l.IndexBlockSize, err = strconv.Atoi(value)
1939 1 : case "target_file_size":
1940 1 : l.TargetFileSize, err = strconv.ParseInt(value, 10, 64)
1941 0 : default:
1942 0 : if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section+"."+key, value) {
1943 0 : return nil
1944 0 : }
1945 0 : return errors.Errorf("pebble: unknown option: %s.%s", errors.Safe(section), errors.Safe(key))
1946 : }
1947 1 : return err
1948 : }
1949 1 : if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section+"."+key, value) {
1950 1 : return nil
1951 1 : }
1952 0 : return errors.Errorf("pebble: unknown section: %q", errors.Safe(section))
1953 : }
1954 1 : return parseOptions(s, parseOptionsFuncs{
1955 1 : visitKeyValue: visitKeyValue,
1956 1 : })
1957 : }
1958 :
1959 : // ErrMissingWALRecoveryDir is an error returned when a database is attempted to be
1960 : // opened without supplying a Options.WALRecoveryDir entry for a directory that
1961 : // may contain WALs required to recover a consistent database state.
1962 : type ErrMissingWALRecoveryDir struct {
1963 : Dir string
1964 : }
1965 :
1966 : // Error implements error.
1967 1 : func (e ErrMissingWALRecoveryDir) Error() string {
1968 1 : return fmt.Sprintf("directory %q may contain relevant WALs", e.Dir)
1969 1 : }
1970 :
1971 : // CheckCompatibility verifies the options are compatible with the previous options
1972 : // serialized by Options.String(). For example, the Comparer and Merger must be
1973 : // the same, or data will not be able to be properly read from the DB.
1974 : //
1975 : // This function only looks at specific keys and does not error out if the
1976 : // options are newer and contain unknown keys.
1977 1 : func (o *Options) CheckCompatibility(previousOptions string) error {
1978 1 : visitKeyValue := func(i, j int, section, key, value string) error {
1979 1 : switch section + "." + key {
1980 1 : case "Options.comparer":
1981 1 : if value != o.Comparer.Name {
1982 1 : return errors.Errorf("pebble: comparer name from file %q != comparer name from options %q",
1983 1 : errors.Safe(value), errors.Safe(o.Comparer.Name))
1984 1 : }
1985 1 : case "Options.merger":
1986 1 : // RocksDB allows the merge operator to be unspecified, in which case it
1987 1 : // shows up as "nullptr".
1988 1 : if value != "nullptr" && value != o.Merger.Name {
1989 1 : return errors.Errorf("pebble: merger name from file %q != merger name from options %q",
1990 1 : errors.Safe(value), errors.Safe(o.Merger.Name))
1991 1 : }
1992 1 : case "Options.wal_dir", "WAL Failover.secondary_dir":
1993 1 : switch {
1994 1 : case o.WALDir == value:
1995 1 : return nil
1996 1 : case o.WALFailover != nil && o.WALFailover.Secondary.Dirname == value:
1997 1 : return nil
1998 1 : default:
1999 1 : for _, d := range o.WALRecoveryDirs {
2000 1 : if d.Dirname == value {
2001 1 : return nil
2002 1 : }
2003 : }
2004 1 : return ErrMissingWALRecoveryDir{Dir: value}
2005 : }
2006 : }
2007 1 : return nil
2008 : }
2009 1 : return parseOptions(previousOptions, parseOptionsFuncs{visitKeyValue: visitKeyValue})
2010 : }
2011 :
2012 : // Validate verifies that the options are mutually consistent. For example,
2013 : // L0StopWritesThreshold must be >= L0CompactionThreshold, otherwise a write
2014 : // stall would persist indefinitely.
2015 1 : func (o *Options) Validate() error {
2016 1 : // Note that we can presume Options.EnsureDefaults has been called, so there
2017 1 : // is no need to check for zero values.
2018 1 :
2019 1 : var buf strings.Builder
2020 1 : if o.Experimental.L0CompactionConcurrency < 1 {
2021 1 : fmt.Fprintf(&buf, "L0CompactionConcurrency (%d) must be >= 1\n",
2022 1 : o.Experimental.L0CompactionConcurrency)
2023 1 : }
2024 1 : if o.L0StopWritesThreshold < o.L0CompactionThreshold {
2025 1 : fmt.Fprintf(&buf, "L0StopWritesThreshold (%d) must be >= L0CompactionThreshold (%d)\n",
2026 1 : o.L0StopWritesThreshold, o.L0CompactionThreshold)
2027 1 : }
2028 1 : if uint64(o.MemTableSize) >= maxMemTableSize {
2029 1 : fmt.Fprintf(&buf, "MemTableSize (%s) must be < %s\n",
2030 1 : humanize.Bytes.Uint64(uint64(o.MemTableSize)), humanize.Bytes.Uint64(maxMemTableSize))
2031 1 : }
2032 1 : if o.MemTableStopWritesThreshold < 2 {
2033 1 : fmt.Fprintf(&buf, "MemTableStopWritesThreshold (%d) must be >= 2\n",
2034 1 : o.MemTableStopWritesThreshold)
2035 1 : }
2036 1 : if o.FormatMajorVersion < FormatMinSupported || o.FormatMajorVersion > internalFormatNewest {
2037 0 : fmt.Fprintf(&buf, "FormatMajorVersion (%d) must be between %d and %d\n",
2038 0 : o.FormatMajorVersion, FormatMinSupported, internalFormatNewest)
2039 0 : }
2040 1 : if o.Experimental.CreateOnShared != remote.CreateOnSharedNone && o.FormatMajorVersion < FormatMinForSharedObjects {
2041 0 : fmt.Fprintf(&buf, "FormatMajorVersion (%d) when CreateOnShared is set must be at least %d\n",
2042 0 : o.FormatMajorVersion, FormatMinForSharedObjects)
2043 0 : }
2044 1 : if len(o.KeySchemas) > 0 {
2045 1 : if o.KeySchema == "" {
2046 0 : fmt.Fprintf(&buf, "KeySchemas is set but KeySchema is not\n")
2047 0 : }
2048 1 : if _, ok := o.KeySchemas[o.KeySchema]; !ok {
2049 0 : fmt.Fprintf(&buf, "KeySchema %q not found in KeySchemas\n", o.KeySchema)
2050 0 : }
2051 : }
2052 1 : if buf.Len() == 0 {
2053 1 : return nil
2054 1 : }
2055 1 : return errors.New(buf.String())
2056 : }
2057 :
2058 : // MakeReaderOptions constructs sstable.ReaderOptions from the corresponding
2059 : // options in the receiver.
2060 1 : func (o *Options) MakeReaderOptions() sstable.ReaderOptions {
2061 1 : var readerOpts sstable.ReaderOptions
2062 1 : if o != nil {
2063 1 : readerOpts.Comparer = o.Comparer
2064 1 : readerOpts.Filters = o.Filters
2065 1 : readerOpts.KeySchemas = o.KeySchemas
2066 1 : readerOpts.LoadBlockSema = o.LoadBlockSema
2067 1 : readerOpts.LoggerAndTracer = o.LoggerAndTracer
2068 1 : readerOpts.Merger = o.Merger
2069 1 : }
2070 1 : return readerOpts
2071 : }
2072 :
2073 : // MakeWriterOptions constructs sstable.WriterOptions for the specified level
2074 : // from the corresponding options in the receiver.
2075 1 : func (o *Options) MakeWriterOptions(level int, format sstable.TableFormat) sstable.WriterOptions {
2076 1 : var writerOpts sstable.WriterOptions
2077 1 : writerOpts.TableFormat = format
2078 1 : if o != nil {
2079 1 : writerOpts.Comparer = o.Comparer
2080 1 : if o.Merger != nil {
2081 1 : writerOpts.MergerName = o.Merger.Name
2082 1 : }
2083 1 : writerOpts.BlockPropertyCollectors = o.BlockPropertyCollectors
2084 : }
2085 1 : if format >= sstable.TableFormatPebblev3 {
2086 1 : writerOpts.ShortAttributeExtractor = o.Experimental.ShortAttributeExtractor
2087 1 : writerOpts.RequiredInPlaceValueBound = o.Experimental.RequiredInPlaceValueBound
2088 1 : if format >= sstable.TableFormatPebblev4 && level == numLevels-1 {
2089 1 : writerOpts.WritingToLowestLevel = true
2090 1 : }
2091 : }
2092 1 : levelOpts := o.Level(level)
2093 1 : writerOpts.BlockRestartInterval = levelOpts.BlockRestartInterval
2094 1 : writerOpts.BlockSize = levelOpts.BlockSize
2095 1 : writerOpts.BlockSizeThreshold = levelOpts.BlockSizeThreshold
2096 1 : writerOpts.Compression = resolveDefaultCompression(levelOpts.Compression())
2097 1 : writerOpts.FilterPolicy = levelOpts.FilterPolicy
2098 1 : writerOpts.FilterType = levelOpts.FilterType
2099 1 : writerOpts.IndexBlockSize = levelOpts.IndexBlockSize
2100 1 : writerOpts.KeySchema = o.KeySchemas[o.KeySchema]
2101 1 : writerOpts.AllocatorSizeClasses = o.AllocatorSizeClasses
2102 1 : writerOpts.NumDeletionsThreshold = o.Experimental.NumDeletionsThreshold
2103 1 : writerOpts.DeletionSizeRatioThreshold = o.Experimental.DeletionSizeRatioThreshold
2104 1 : return writerOpts
2105 : }
2106 :
2107 1 : func resolveDefaultCompression(c Compression) Compression {
2108 1 : if c <= DefaultCompression || c >= block.NCompression {
2109 1 : c = SnappyCompression
2110 1 : }
2111 1 : return c
2112 : }
2113 :
2114 : // UserKeyCategories describes a partitioning of the user key space. Each
2115 : // partition is a category with a name. The categories are used for informative
2116 : // purposes only (like pprof labels). Pebble does not treat keys differently
2117 : // based on the UserKeyCategories.
2118 : //
2119 : // The partitions are defined by their upper bounds. The last partition is
2120 : // assumed to go until the end of keyspace; its UpperBound is ignored. The rest
2121 : // of the partitions are ordered by their UpperBound.
2122 : type UserKeyCategories struct {
2123 : categories []UserKeyCategory
2124 : cmp base.Compare
2125 : // rangeNames[i][j] contains the string referring to the categories in the
2126 : // range [i, j], with j > i.
2127 : rangeNames [][]string
2128 : }
2129 :
2130 : // UserKeyCategory describes a partition of the user key space.
2131 : //
2132 : // User keys >= the previous category's UpperBound and < this category's
2133 : // UpperBound are part of this category.
2134 : type UserKeyCategory struct {
2135 : Name string
2136 : // UpperBound is the exclusive upper bound of the category. All user keys >= the
2137 : // previous category's UpperBound and < this UpperBound are part of this
2138 : // category.
2139 : UpperBound []byte
2140 : }
2141 :
2142 : // MakeUserKeyCategories creates a UserKeyCategories object with the given
2143 : // categories. The object is immutable and can be reused across different
2144 : // stores.
2145 1 : func MakeUserKeyCategories(cmp base.Compare, categories ...UserKeyCategory) UserKeyCategories {
2146 1 : n := len(categories)
2147 1 : if n == 0 {
2148 0 : return UserKeyCategories{}
2149 0 : }
2150 1 : if categories[n-1].UpperBound != nil {
2151 0 : panic("last category UpperBound must be nil")
2152 : }
2153 : // Verify that the partitions are ordered as expected.
2154 1 : for i := 1; i < n-1; i++ {
2155 1 : if cmp(categories[i-1].UpperBound, categories[i].UpperBound) >= 0 {
2156 0 : panic("invalid UserKeyCategories: key prefixes must be sorted")
2157 : }
2158 : }
2159 :
2160 : // Precalculate a table of range names to avoid allocations in the
2161 : // categorization path.
2162 1 : rangeNamesBuf := make([]string, n*n)
2163 1 : rangeNames := make([][]string, n)
2164 1 : for i := range rangeNames {
2165 1 : rangeNames[i] = rangeNamesBuf[:n]
2166 1 : rangeNamesBuf = rangeNamesBuf[n:]
2167 1 : for j := i + 1; j < n; j++ {
2168 1 : rangeNames[i][j] = categories[i].Name + "-" + categories[j].Name
2169 1 : }
2170 : }
2171 1 : return UserKeyCategories{
2172 1 : categories: categories,
2173 1 : cmp: cmp,
2174 1 : rangeNames: rangeNames,
2175 1 : }
2176 : }
2177 :
2178 : // Len returns the number of categories defined.
2179 1 : func (kc *UserKeyCategories) Len() int {
2180 1 : return len(kc.categories)
2181 1 : }
2182 :
2183 : // CategorizeKey returns the name of the category containing the key.
2184 1 : func (kc *UserKeyCategories) CategorizeKey(userKey []byte) string {
2185 1 : idx := sort.Search(len(kc.categories)-1, func(i int) bool {
2186 1 : return kc.cmp(userKey, kc.categories[i].UpperBound) < 0
2187 1 : })
2188 1 : return kc.categories[idx].Name
2189 : }
2190 :
2191 : // CategorizeKeyRange returns the name of the category containing the key range.
2192 : // If the key range spans multiple categories, the result shows the first and
2193 : // last category separated by a dash, e.g. `cat1-cat5`.
2194 1 : func (kc *UserKeyCategories) CategorizeKeyRange(startUserKey, endUserKey []byte) string {
2195 1 : n := len(kc.categories)
2196 1 : p := sort.Search(n-1, func(i int) bool {
2197 1 : return kc.cmp(startUserKey, kc.categories[i].UpperBound) < 0
2198 1 : })
2199 1 : if p == n-1 || kc.cmp(endUserKey, kc.categories[p].UpperBound) < 0 {
2200 1 : // Fast path for a single category.
2201 1 : return kc.categories[p].Name
2202 1 : }
2203 : // Binary search among the remaining categories.
2204 1 : q := p + 1 + sort.Search(n-2-p, func(i int) bool {
2205 1 : return kc.cmp(endUserKey, kc.categories[p+1+i].UpperBound) < 0
2206 1 : })
2207 1 : return kc.rangeNames[p][q]
2208 : }
|