Line data Source code
1 : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package metamorphic
6 :
7 : import (
8 : "bytes"
9 : "fmt"
10 : "os"
11 : "path/filepath"
12 : "runtime"
13 : "strconv"
14 : "strings"
15 : "time"
16 :
17 : "github.com/cockroachdb/errors"
18 : "github.com/cockroachdb/pebble"
19 : "github.com/cockroachdb/pebble/bloom"
20 : "github.com/cockroachdb/pebble/internal/base"
21 : "github.com/cockroachdb/pebble/internal/cache"
22 : "github.com/cockroachdb/pebble/internal/testkeys"
23 : "github.com/cockroachdb/pebble/objstorage/remote"
24 : "github.com/cockroachdb/pebble/sstable"
25 : "github.com/cockroachdb/pebble/vfs"
26 : "golang.org/x/exp/rand"
27 : )
28 :
29 : const (
30 : minimumFormatMajorVersion = pebble.FormatMinSupported
31 : // The format major version to use in the default options configurations. We
32 : // default to the minimum supported format so we exercise the runtime version
33 : // ratcheting that a cluster upgrading would experience. The randomized
34 : // options may still use format major versions that are less than
35 : // defaultFormatMajorVersion but are at least minimumFormatMajorVersion.
36 : defaultFormatMajorVersion = pebble.FormatMinSupported
37 : // newestFormatMajorVersionToTest is the most recent format major version
38 : // the metamorphic tests should use. This may be greater than
39 : // pebble.FormatNewest when some format major versions are marked as
40 : // experimental.
41 : newestFormatMajorVersionToTest = pebble.FormatNewest
42 : )
43 :
44 : func parseOptions(
45 : opts *TestOptions, data string, customOptionParsers map[string]func(string) (CustomOption, bool),
46 1 : ) error {
47 1 : hooks := &pebble.ParseHooks{
48 1 : NewCache: pebble.NewCache,
49 1 : NewFilterPolicy: filterPolicyFromName,
50 1 : SkipUnknown: func(name, value string) bool {
51 1 : switch name {
52 0 : case "TestOptions":
53 0 : return true
54 1 : case "TestOptions.strictfs":
55 1 : opts.strictFS = true
56 1 : opts.Opts.FS = vfs.NewStrictMem()
57 1 : return true
58 1 : case "TestOptions.ingest_using_apply":
59 1 : opts.ingestUsingApply = true
60 1 : return true
61 1 : case "TestOptions.delete_sized":
62 1 : opts.deleteSized = true
63 1 : return true
64 1 : case "TestOptions.replace_single_delete":
65 1 : opts.replaceSingleDelete = true
66 1 : return true
67 1 : case "TestOptions.use_disk":
68 1 : opts.useDisk = true
69 1 : opts.Opts.FS = vfs.Default
70 1 : return true
71 0 : case "TestOptions.initial_state_desc":
72 0 : opts.initialStateDesc = value
73 0 : return true
74 0 : case "TestOptions.initial_state_path":
75 0 : opts.initialStatePath = value
76 0 : return true
77 1 : case "TestOptions.threads":
78 1 : v, err := strconv.Atoi(value)
79 1 : if err != nil {
80 0 : panic(err)
81 : }
82 1 : opts.Threads = v
83 1 : return true
84 1 : case "TestOptions.disable_block_property_collector":
85 1 : v, err := strconv.ParseBool(value)
86 1 : if err != nil {
87 0 : panic(err)
88 : }
89 1 : opts.disableBlockPropertyCollector = v
90 1 : if v {
91 1 : opts.Opts.BlockPropertyCollectors = nil
92 1 : }
93 1 : return true
94 1 : case "TestOptions.enable_value_blocks":
95 1 : opts.enableValueBlocks = true
96 1 : opts.Opts.Experimental.EnableValueBlocks = func() bool { return true }
97 1 : return true
98 1 : case "TestOptions.disable_value_blocks_for_ingest_sstables":
99 1 : opts.disableValueBlocksForIngestSSTables = true
100 1 : return true
101 1 : case "TestOptions.async_apply_to_db":
102 1 : opts.asyncApplyToDB = true
103 1 : return true
104 1 : case "TestOptions.shared_storage_enabled":
105 1 : opts.sharedStorageEnabled = true
106 1 : sharedStorage := remote.NewInMem()
107 1 : opts.Opts.Experimental.RemoteStorage = remote.MakeSimpleFactory(map[remote.Locator]remote.Storage{
108 1 : "": sharedStorage,
109 1 : })
110 1 : opts.sharedStorageFS = sharedStorage
111 1 : if opts.Opts.Experimental.CreateOnShared == remote.CreateOnSharedNone {
112 0 : opts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedAll
113 0 : }
114 1 : return true
115 1 : case "TestOptions.secondary_cache_enabled":
116 1 : opts.secondaryCacheEnabled = true
117 1 : opts.Opts.Experimental.SecondaryCacheSizeBytes = 1024 * 1024 * 32 // 32 MBs
118 1 : return true
119 1 : case "TestOptions.seed_efos":
120 1 : v, err := strconv.ParseUint(value, 10, 64)
121 1 : if err != nil {
122 0 : panic(err)
123 : }
124 1 : opts.seedEFOS = v
125 1 : return true
126 0 : case "TestOptions.ingest_split":
127 0 : opts.ingestSplit = true
128 0 : opts.Opts.Experimental.IngestSplit = func() bool {
129 0 : return true
130 0 : }
131 0 : return true
132 1 : case "TestOptions.use_shared_replicate":
133 1 : opts.useSharedReplicate = true
134 1 : return true
135 1 : case "TestOptions.use_excise":
136 1 : opts.useExcise = true
137 1 : return true
138 1 : case "TestOptions.efos_always_creates_iterators":
139 1 : opts.efosAlwaysCreatesIters = true
140 1 : opts.Opts.TestingAlwaysCreateEFOSIterators(true /* value */)
141 1 : return true
142 0 : default:
143 0 : if customOptionParsers == nil {
144 0 : return false
145 0 : }
146 0 : name = strings.TrimPrefix(name, "TestOptions.")
147 0 : if p, ok := customOptionParsers[name]; ok {
148 0 : if customOpt, ok := p(value); ok {
149 0 : opts.CustomOpts = append(opts.CustomOpts, customOpt)
150 0 : return true
151 0 : }
152 : }
153 0 : return false
154 : }
155 : },
156 : }
157 1 : err := opts.Opts.Parse(data, hooks)
158 1 : opts.Opts.EnsureDefaults()
159 1 : return err
160 : }
161 :
162 0 : func optionsToString(opts *TestOptions) string {
163 0 : var buf bytes.Buffer
164 0 : if opts.strictFS {
165 0 : fmt.Fprint(&buf, " strictfs=true\n")
166 0 : }
167 0 : if opts.ingestUsingApply {
168 0 : fmt.Fprint(&buf, " ingest_using_apply=true\n")
169 0 : }
170 0 : if opts.deleteSized {
171 0 : fmt.Fprint(&buf, " delete_sized=true\n")
172 0 : }
173 0 : if opts.replaceSingleDelete {
174 0 : fmt.Fprint(&buf, " replace_single_delete=true\n")
175 0 : }
176 0 : if opts.useDisk {
177 0 : fmt.Fprint(&buf, " use_disk=true\n")
178 0 : }
179 0 : if opts.initialStatePath != "" {
180 0 : fmt.Fprintf(&buf, " initial_state_path=%s\n", opts.initialStatePath)
181 0 : }
182 0 : if opts.initialStateDesc != "" {
183 0 : fmt.Fprintf(&buf, " initial_state_desc=%s\n", opts.initialStateDesc)
184 0 : }
185 0 : if opts.Threads != 0 {
186 0 : fmt.Fprintf(&buf, " threads=%d\n", opts.Threads)
187 0 : }
188 0 : if opts.disableBlockPropertyCollector {
189 0 : fmt.Fprintf(&buf, " disable_block_property_collector=%t\n", opts.disableBlockPropertyCollector)
190 0 : }
191 0 : if opts.enableValueBlocks {
192 0 : fmt.Fprintf(&buf, " enable_value_blocks=%t\n", opts.enableValueBlocks)
193 0 : }
194 0 : if opts.disableValueBlocksForIngestSSTables {
195 0 : fmt.Fprintf(&buf, " disable_value_blocks_for_ingest_sstables=%t\n", opts.disableValueBlocksForIngestSSTables)
196 0 : }
197 0 : if opts.asyncApplyToDB {
198 0 : fmt.Fprint(&buf, " async_apply_to_db=true\n")
199 0 : }
200 0 : if opts.sharedStorageEnabled {
201 0 : fmt.Fprint(&buf, " shared_storage_enabled=true\n")
202 0 : }
203 0 : if opts.secondaryCacheEnabled {
204 0 : fmt.Fprint(&buf, " secondary_cache_enabled=true\n")
205 0 : }
206 0 : if opts.seedEFOS != 0 {
207 0 : fmt.Fprintf(&buf, " seed_efos=%d\n", opts.seedEFOS)
208 0 : }
209 0 : if opts.ingestSplit {
210 0 : fmt.Fprintf(&buf, " ingest_split=%v\n", opts.ingestSplit)
211 0 : }
212 0 : if opts.useSharedReplicate {
213 0 : fmt.Fprintf(&buf, " use_shared_replicate=%v\n", opts.useSharedReplicate)
214 0 : }
215 0 : if opts.useExcise {
216 0 : fmt.Fprintf(&buf, " use_excise=%v\n", opts.useExcise)
217 0 : }
218 0 : if opts.efosAlwaysCreatesIters {
219 0 : fmt.Fprintf(&buf, " efos_always_creates_iterators=%v\n", opts.efosAlwaysCreatesIters)
220 0 : }
221 0 : for _, customOpt := range opts.CustomOpts {
222 0 : fmt.Fprintf(&buf, " %s=%s\n", customOpt.Name(), customOpt.Value())
223 0 : }
224 :
225 0 : s := opts.Opts.String()
226 0 : if buf.Len() == 0 {
227 0 : return s
228 0 : }
229 0 : return s + "\n[TestOptions]\n" + buf.String()
230 : }
231 :
232 1 : func defaultTestOptions() *TestOptions {
233 1 : return &TestOptions{
234 1 : Opts: defaultOptions(),
235 1 : Threads: 16,
236 1 : RetryPolicy: NeverRetry,
237 1 : }
238 1 : }
239 :
240 1 : func defaultOptions() *pebble.Options {
241 1 : opts := &pebble.Options{
242 1 : // Use an archive cleaner to ease post-mortem debugging.
243 1 : Cleaner: base.ArchiveCleaner{},
244 1 : // Always use our custom comparer which provides a Split method,
245 1 : // splitting keys at the trailing '@'.
246 1 : Comparer: testkeys.Comparer,
247 1 : DebugCheck: pebble.DebugCheckLevels,
248 1 : FS: vfs.NewMem(),
249 1 : FormatMajorVersion: defaultFormatMajorVersion,
250 1 : Levels: []pebble.LevelOptions{{
251 1 : FilterPolicy: bloom.FilterPolicy(10),
252 1 : }},
253 1 : BlockPropertyCollectors: blockPropertyCollectorConstructors,
254 1 : }
255 1 : return opts
256 1 : }
257 :
258 : // TestOptions describes the options configuring an individual run of the
259 : // metamorphic tests.
260 : type TestOptions struct {
261 : // Opts holds the *pebble.Options for the test.
262 : Opts *pebble.Options
263 : // Threads configures the parallelism of the test. Each thread will run in
264 : // an independent goroutine and be responsible for executing operations
265 : // against an independent set of objects. The outcome of any individual
266 : // operation will still be deterministic, with the metamorphic test
267 : // inserting synchronization where necessary.
268 : Threads int
269 : // RetryPolicy configures which errors should be retried.
270 : RetryPolicy RetryPolicy
271 : // CustomOptions holds custom test options that are defined outside of this
272 : // package.
273 : CustomOpts []CustomOption
274 :
275 : // internal
276 :
277 : useDisk bool
278 : strictFS bool
279 : // Use Batch.Apply rather than DB.Ingest.
280 : ingestUsingApply bool
281 : // Use Batch.DeleteSized rather than Batch.Delete.
282 : deleteSized bool
283 : // Replace a SINGLEDEL with a DELETE.
284 : replaceSingleDelete bool
285 : // The path on the local filesystem where the initial state of the database
286 : // exists. Empty if the test run begins from an empty database state.
287 : initialStatePath string
288 : // A human-readable string describing the initial state of the database.
289 : // Empty if the test run begins from an empty database state.
290 : initialStateDesc string
291 : // Disable the block property collector, which may be used by block property
292 : // filters.
293 : disableBlockPropertyCollector bool
294 : // Enable the use of value blocks.
295 : enableValueBlocks bool
296 : // Disables value blocks in the sstables written for ingest.
297 : disableValueBlocksForIngestSSTables bool
298 : // Use DB.ApplyNoSyncWait for applies that want to sync the WAL.
299 : asyncApplyToDB bool
300 : // Enable the use of shared storage.
301 : sharedStorageEnabled bool
302 : // sharedStorageFS stores the remote.Storage that is being used with shared
303 : // storage.
304 : sharedStorageFS remote.Storage
305 : // Enables the use of shared replication in TestOptions.
306 : useSharedReplicate bool
307 : // Enable the secondary cache. Only effective if sharedStorageEnabled is
308 : // also true.
309 : secondaryCacheEnabled bool
310 : // If nonzero, enables the use of EventuallyFileOnlySnapshots for
311 : // newSnapshotOps that are keyspan-bounded. The set of which newSnapshotOps
312 : // are actually created as EventuallyFileOnlySnapshots is deterministically
313 : // derived from the seed and the operation index.
314 : seedEFOS uint64
315 : // Enables ingest splits. Saved here for serialization as Options does not
316 : // serialize this.
317 : ingestSplit bool
318 : // Enables operations that do excises. Note that a false value for this does
319 : // not guarantee the lack of excises, as useSharedReplicate can also cause
320 : // excises. However !useExcise && !useSharedReplicate can be used to guarantee
321 : // lack of excises.
322 : useExcise bool
323 : // Enables EFOS to always create iterators, even if a conflicting excise
324 : // happens. Used to guarantee EFOS determinism when conflicting excises are
325 : // in play. If false, EFOS determinism is maintained by having the DB do a
326 : // flush after every new EFOS.
327 : efosAlwaysCreatesIters bool
328 : }
329 :
330 : // CustomOption defines a custom option that configures the behavior of an
331 : // individual test run. Like all test options, custom options are serialized to
332 : // the OPTIONS file even if they're not options ordinarily understood by Pebble.
333 : type CustomOption interface {
334 : // Name returns the name of the custom option. This is the key under which
335 : // the option appears in the OPTIONS file, within the [TestOptions] stanza.
336 : Name() string
337 : // Value returns the value of the custom option, serialized as it should
338 : // appear within the OPTIONS file.
339 : Value() string
340 : // Close is run after the test database has been closed at the end of the
341 : // test as well as during restart operations within the test sequence. It's
342 : // passed a copy of the *pebble.Options. If the custom options hold on to
343 : // any resources outside, Close should release them.
344 : Close(*pebble.Options) error
345 : // Open is run before the test runs and during a restart operation after the
346 : // test database has been closed and Close has been called. It's passed a
347 : // copy of the *pebble.Options. If the custom options must acquire any
348 : // resources before the test continues, it should reacquire them.
349 : Open(*pebble.Options) error
350 :
351 : // TODO(jackson): provide additional hooks for custom options changing the
352 : // behavior of a run.
353 : }
354 :
355 0 : func standardOptions() []*TestOptions {
356 0 : // The index labels are not strictly necessary, but they make it easier to
357 0 : // find which options correspond to a failure.
358 0 : stdOpts := []string{
359 0 : 0: "", // default options
360 0 : 1: `
361 0 : [Options]
362 0 : cache_size=1
363 0 : `,
364 0 : 2: `
365 0 : [Options]
366 0 : disable_wal=true
367 0 : `,
368 0 : 3: `
369 0 : [Options]
370 0 : l0_compaction_threshold=1
371 0 : `,
372 0 : 4: `
373 0 : [Options]
374 0 : l0_compaction_threshold=1
375 0 : l0_stop_writes_threshold=1
376 0 : `,
377 0 : 5: `
378 0 : [Options]
379 0 : lbase_max_bytes=1
380 0 : `,
381 0 : 6: `
382 0 : [Options]
383 0 : max_manifest_file_size=1
384 0 : `,
385 0 : 7: `
386 0 : [Options]
387 0 : max_open_files=1
388 0 : `,
389 0 : 8: `
390 0 : [Options]
391 0 : mem_table_size=2000
392 0 : `,
393 0 : 9: `
394 0 : [Options]
395 0 : mem_table_stop_writes_threshold=2
396 0 : `,
397 0 : 10: `
398 0 : [Options]
399 0 : wal_dir=data/wal
400 0 : `,
401 0 : 11: `
402 0 : [Level "0"]
403 0 : block_restart_interval=1
404 0 : `,
405 0 : 12: `
406 0 : [Level "0"]
407 0 : block_size=1
408 0 : `,
409 0 : 13: `
410 0 : [Level "0"]
411 0 : compression=NoCompression
412 0 : `,
413 0 : 14: `
414 0 : [Level "0"]
415 0 : index_block_size=1
416 0 : `,
417 0 : 15: `
418 0 : [Level "0"]
419 0 : target_file_size=1
420 0 : `,
421 0 : 16: `
422 0 : [Level "0"]
423 0 : filter_policy=none
424 0 : `,
425 0 : // 1GB
426 0 : 17: `
427 0 : [Options]
428 0 : bytes_per_sync=1073741824
429 0 : [TestOptions]
430 0 : strictfs=true
431 0 : `,
432 0 : 18: `
433 0 : [Options]
434 0 : max_concurrent_compactions=2
435 0 : `,
436 0 : 19: `
437 0 : [TestOptions]
438 0 : ingest_using_apply=true
439 0 : `,
440 0 : 20: `
441 0 : [TestOptions]
442 0 : replace_single_delete=true
443 0 : `,
444 0 : 21: `
445 0 : [TestOptions]
446 0 : use_disk=true
447 0 : `,
448 0 : 22: `
449 0 : [Options]
450 0 : max_writer_concurrency=2
451 0 : force_writer_parallelism=true
452 0 : `,
453 0 : 23: `
454 0 : [TestOptions]
455 0 : disable_block_property_collector=true
456 0 : `,
457 0 : 24: `
458 0 : [TestOptions]
459 0 : threads=1
460 0 : `,
461 0 : 25: `
462 0 : [TestOptions]
463 0 : enable_value_blocks=true
464 0 : `,
465 0 : 26: fmt.Sprintf(`
466 0 : [Options]
467 0 : format_major_version=%s
468 0 : `, newestFormatMajorVersionToTest),
469 0 : 27: fmt.Sprintf(`
470 0 : [Options]
471 0 : format_major_version=%s
472 0 : [TestOptions]
473 0 : shared_storage_enabled=true
474 0 : secondary_cache_enabled=true
475 0 : `, pebble.FormatMinForSharedObjects),
476 0 : }
477 0 :
478 0 : opts := make([]*TestOptions, len(stdOpts))
479 0 : for i := range opts {
480 0 : opts[i] = defaultTestOptions()
481 0 : // NB: The standard options by definition can never include custom
482 0 : // options, so no need to propagate custom option parsers.
483 0 : if err := parseOptions(opts[i], stdOpts[i], nil /* custom option parsers */); err != nil {
484 0 : panic(err)
485 : }
486 : }
487 0 : return opts
488 : }
489 :
490 : // RandomOptions generates a random set of operations, drawing randomness from
491 : // rng.
492 : func RandomOptions(
493 : rng *rand.Rand, customOptionParsers map[string]func(string) (CustomOption, bool),
494 0 : ) *TestOptions {
495 0 : testOpts := defaultTestOptions()
496 0 : opts := testOpts.Opts
497 0 :
498 0 : // There are some private options, which we don't want users to fiddle with.
499 0 : // There's no way to set it through the public interface. The only method is
500 0 : // through Parse.
501 0 : {
502 0 : var privateOpts bytes.Buffer
503 0 : fmt.Fprintln(&privateOpts, `[Options]`)
504 0 : if rng.Intn(3) == 0 /* 33% */ {
505 0 : fmt.Fprintln(&privateOpts, ` disable_delete_only_compactions=true`)
506 0 : }
507 0 : if rng.Intn(3) == 0 /* 33% */ {
508 0 : fmt.Fprintln(&privateOpts, ` disable_elision_only_compactions=true`)
509 0 : }
510 0 : if rng.Intn(5) == 0 /* 20% */ {
511 0 : fmt.Fprintln(&privateOpts, ` disable_lazy_combined_iteration=true`)
512 0 : }
513 0 : if privateOptsStr := privateOpts.String(); privateOptsStr != `[Options]\n` {
514 0 : parseOptions(testOpts, privateOptsStr, customOptionParsers)
515 0 : }
516 : }
517 :
518 0 : opts.BytesPerSync = 1 << uint(rng.Intn(28)) // 1B - 256MB
519 0 : opts.Cache = cache.New(1 << uint(rng.Intn(30))) // 1B - 1GB
520 0 : opts.DisableWAL = rng.Intn(2) == 0
521 0 : opts.FlushDelayDeleteRange = time.Millisecond * time.Duration(5*rng.Intn(245)) // 5-250ms
522 0 : opts.FlushDelayRangeKey = time.Millisecond * time.Duration(5*rng.Intn(245)) // 5-250ms
523 0 : opts.FlushSplitBytes = 1 << rng.Intn(20) // 1B - 1MB
524 0 : opts.FormatMajorVersion = minimumFormatMajorVersion
525 0 : n := int(newestFormatMajorVersionToTest - opts.FormatMajorVersion)
526 0 : opts.FormatMajorVersion += pebble.FormatMajorVersion(rng.Intn(n + 1))
527 0 : opts.Experimental.L0CompactionConcurrency = 1 + rng.Intn(4) // 1-4
528 0 : opts.Experimental.LevelMultiplier = 5 << rng.Intn(7) // 5 - 320
529 0 : opts.TargetByteDeletionRate = 1 << uint(20+rng.Intn(10)) // 1MB - 1GB
530 0 : opts.Experimental.ValidateOnIngest = rng.Intn(2) != 0
531 0 : opts.L0CompactionThreshold = 1 + rng.Intn(100) // 1 - 100
532 0 : opts.L0CompactionFileThreshold = 1 << rng.Intn(11) // 1 - 1024
533 0 : opts.L0StopWritesThreshold = 1 + rng.Intn(100) // 1 - 100
534 0 : if opts.L0StopWritesThreshold < opts.L0CompactionThreshold {
535 0 : opts.L0StopWritesThreshold = opts.L0CompactionThreshold
536 0 : }
537 0 : opts.LBaseMaxBytes = 1 << uint(rng.Intn(30)) // 1B - 1GB
538 0 : maxConcurrentCompactions := rng.Intn(3) + 1 // 1-3
539 0 : opts.MaxConcurrentCompactions = func() int {
540 0 : return maxConcurrentCompactions
541 0 : }
542 0 : opts.MaxManifestFileSize = 1 << uint(rng.Intn(30)) // 1B - 1GB
543 0 : opts.MemTableSize = 2 << (10 + uint(rng.Intn(16))) // 2KB - 256MB
544 0 : opts.MemTableStopWritesThreshold = 2 + rng.Intn(5) // 2 - 5
545 0 : if rng.Intn(2) == 0 {
546 0 : opts.WALDir = "data/wal"
547 0 : }
548 0 : if rng.Intn(4) == 0 {
549 0 : // Enable Writer parallelism for 25% of the random options. Setting
550 0 : // MaxWriterConcurrency to any value greater than or equal to 1 has the
551 0 : // same effect currently.
552 0 : opts.Experimental.MaxWriterConcurrency = 2
553 0 : opts.Experimental.ForceWriterParallelism = true
554 0 : }
555 0 : if rng.Intn(2) == 0 {
556 0 : opts.Experimental.DisableIngestAsFlushable = func() bool { return true }
557 : }
558 :
559 : // We either use no multilevel compactions, multilevel compactions with the
560 : // default (zero) additional propensity, or multilevel compactions with an
561 : // additional propensity to encourage more multilevel compactions than we
562 : // ohterwise would.
563 0 : switch rng.Intn(3) {
564 0 : case 0:
565 0 : opts.Experimental.MultiLevelCompactionHeuristic = pebble.NoMultiLevel{}
566 0 : case 1:
567 0 : opts.Experimental.MultiLevelCompactionHeuristic = pebble.WriteAmpHeuristic{}
568 0 : default:
569 0 : opts.Experimental.MultiLevelCompactionHeuristic = pebble.WriteAmpHeuristic{
570 0 : AddPropensity: rng.Float64() * float64(rng.Intn(3)), // [0,3.0)
571 0 : AllowL0: rng.Intn(4) == 1, // 25% of the time
572 0 : }
573 : }
574 :
575 0 : var lopts pebble.LevelOptions
576 0 : lopts.BlockRestartInterval = 1 + rng.Intn(64) // 1 - 64
577 0 : lopts.BlockSize = 1 << uint(rng.Intn(24)) // 1 - 16MB
578 0 : lopts.BlockSizeThreshold = 50 + rng.Intn(50) // 50 - 100
579 0 : lopts.IndexBlockSize = 1 << uint(rng.Intn(24)) // 1 - 16MB
580 0 : lopts.TargetFileSize = 1 << uint(rng.Intn(28)) // 1 - 256MB
581 0 :
582 0 : // We either use no bloom filter, the default filter, or a filter with
583 0 : // randomized bits-per-key setting. We zero out the Filters map. It'll get
584 0 : // repopulated on EnsureDefaults accordingly.
585 0 : opts.Filters = nil
586 0 : switch rng.Intn(3) {
587 0 : case 0:
588 0 : lopts.FilterPolicy = nil
589 0 : case 1:
590 0 : lopts.FilterPolicy = bloom.FilterPolicy(10)
591 0 : default:
592 0 : lopts.FilterPolicy = newTestingFilterPolicy(1 << rng.Intn(5))
593 : }
594 :
595 : // We use either no compression, snappy compression or zstd compression.
596 0 : switch rng.Intn(3) {
597 0 : case 0:
598 0 : lopts.Compression = pebble.NoCompression
599 0 : case 1:
600 0 : lopts.Compression = pebble.ZstdCompression
601 0 : default:
602 0 : lopts.Compression = pebble.SnappyCompression
603 : }
604 0 : opts.Levels = []pebble.LevelOptions{lopts}
605 0 :
606 0 : // Explicitly disable disk-backed FS's for the random configurations. The
607 0 : // single standard test configuration that uses a disk-backed FS is
608 0 : // sufficient.
609 0 : testOpts.useDisk = false
610 0 : testOpts.strictFS = rng.Intn(2) != 0 // Only relevant for MemFS.
611 0 : testOpts.Threads = rng.Intn(runtime.GOMAXPROCS(0)) + 1
612 0 : if testOpts.strictFS {
613 0 : opts.DisableWAL = false
614 0 : opts.FS = vfs.NewStrictMem()
615 0 : } else if !testOpts.useDisk {
616 0 : opts.FS = vfs.NewMem()
617 0 : }
618 0 : testOpts.ingestUsingApply = rng.Intn(2) != 0
619 0 : testOpts.deleteSized = rng.Intn(2) != 0
620 0 : testOpts.replaceSingleDelete = rng.Intn(2) != 0
621 0 : testOpts.disableBlockPropertyCollector = rng.Intn(2) == 1
622 0 : if testOpts.disableBlockPropertyCollector {
623 0 : testOpts.Opts.BlockPropertyCollectors = nil
624 0 : }
625 0 : testOpts.enableValueBlocks = rng.Intn(2) != 0
626 0 : if testOpts.enableValueBlocks {
627 0 : testOpts.Opts.Experimental.EnableValueBlocks = func() bool { return true }
628 : }
629 0 : testOpts.disableValueBlocksForIngestSSTables = rng.Intn(2) == 0
630 0 : testOpts.asyncApplyToDB = rng.Intn(2) != 0
631 0 : // 20% of time, enable shared storage.
632 0 : if rng.Intn(5) == 0 {
633 0 : testOpts.sharedStorageEnabled = true
634 0 : if testOpts.Opts.FormatMajorVersion < pebble.FormatMinForSharedObjects {
635 0 : testOpts.Opts.FormatMajorVersion = pebble.FormatMinForSharedObjects
636 0 : }
637 0 : inMemShared := remote.NewInMem()
638 0 : testOpts.Opts.Experimental.RemoteStorage = remote.MakeSimpleFactory(map[remote.Locator]remote.Storage{
639 0 : "": inMemShared,
640 0 : })
641 0 : testOpts.sharedStorageFS = inMemShared
642 0 : // If shared storage is enabled, pick between writing all files on shared
643 0 : // vs. lower levels only, 50% of the time.
644 0 : testOpts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedAll
645 0 : if rng.Intn(2) == 0 {
646 0 : testOpts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedLower
647 0 : }
648 : // If shared storage is enabled, enable secondary cache 50% of time.
649 0 : if rng.Intn(2) == 0 {
650 0 : testOpts.secondaryCacheEnabled = true
651 0 : // TODO(josh): Randomize various secondary cache settings.
652 0 : testOpts.Opts.Experimental.SecondaryCacheSizeBytes = 1024 * 1024 * 32 // 32 MBs
653 0 : }
654 : // 50% of the time, enable shared replication.
655 0 : testOpts.useSharedReplicate = rng.Intn(2) == 0
656 : }
657 0 : testOpts.seedEFOS = rng.Uint64()
658 0 : // TODO(bilal): Enable ingestSplit when known bugs with virtual sstables
659 0 : // are addressed.
660 0 : //
661 0 : // testOpts.ingestSplit = rng.Intn(2) == 0
662 0 : opts.Experimental.IngestSplit = func() bool { return testOpts.ingestSplit }
663 0 : testOpts.useExcise = rng.Intn(2) == 0
664 0 : if testOpts.useExcise {
665 0 : if testOpts.Opts.FormatMajorVersion < pebble.FormatVirtualSSTables {
666 0 : testOpts.Opts.FormatMajorVersion = pebble.FormatVirtualSSTables
667 0 : }
668 : }
669 0 : if testOpts.useExcise || testOpts.useSharedReplicate {
670 0 : testOpts.efosAlwaysCreatesIters = rng.Intn(2) == 0
671 0 : opts.TestingAlwaysCreateEFOSIterators(testOpts.efosAlwaysCreatesIters)
672 0 : }
673 0 : testOpts.Opts.EnsureDefaults()
674 0 : return testOpts
675 : }
676 :
677 0 : func setupInitialState(dataDir string, testOpts *TestOptions) error {
678 0 : // Copy (vfs.Default,<initialStatePath>/data) to (testOpts.opts.FS,<dataDir>).
679 0 : ok, err := vfs.Clone(
680 0 : vfs.Default,
681 0 : testOpts.Opts.FS,
682 0 : vfs.Default.PathJoin(testOpts.initialStatePath, "data"),
683 0 : dataDir,
684 0 : vfs.CloneSync,
685 0 : vfs.CloneSkip(func(filename string) bool {
686 0 : // Skip the archive of historical files, any checkpoints created by
687 0 : // operations and files staged for ingest in tmp.
688 0 : b := filepath.Base(filename)
689 0 : return b == "archive" || b == "checkpoints" || b == "tmp"
690 0 : }))
691 0 : if err != nil {
692 0 : return err
693 0 : } else if !ok {
694 0 : return os.ErrNotExist
695 0 : }
696 :
697 : // Tests with wal_dir set store their WALs in a `wal` directory. The source
698 : // database (initialStatePath) could've had wal_dir set, or the current test
699 : // options (testOpts) could have wal_dir set, or both.
700 0 : fs := testOpts.Opts.FS
701 0 : walDir := fs.PathJoin(dataDir, "wal")
702 0 : if err := fs.MkdirAll(walDir, os.ModePerm); err != nil {
703 0 : return err
704 0 : }
705 :
706 : // Copy <dataDir>/wal/*.log -> <dataDir>.
707 0 : src, dst := walDir, dataDir
708 0 : if testOpts.Opts.WALDir != "" {
709 0 : // Copy <dataDir>/*.log -> <dataDir>/wal.
710 0 : src, dst = dst, src
711 0 : }
712 0 : return moveLogs(fs, src, dst)
713 : }
714 :
715 0 : func moveLogs(fs vfs.FS, srcDir, dstDir string) error {
716 0 : ls, err := fs.List(srcDir)
717 0 : if err != nil {
718 0 : return err
719 0 : }
720 0 : for _, f := range ls {
721 0 : if filepath.Ext(f) != ".log" {
722 0 : continue
723 : }
724 0 : src := fs.PathJoin(srcDir, f)
725 0 : dst := fs.PathJoin(dstDir, f)
726 0 : if err := fs.Rename(src, dst); err != nil {
727 0 : return err
728 0 : }
729 : }
730 0 : return nil
731 : }
732 :
733 : var blockPropertyCollectorConstructors = []func() pebble.BlockPropertyCollector{
734 : sstable.NewTestKeysBlockPropertyCollector,
735 : }
736 :
737 : // testingFilterPolicy is used to allow bloom filter policies with non-default
738 : // bits-per-key setting. It is necessary because the name of the production
739 : // filter policy is fixed (see bloom.FilterPolicy.Name()); we need to output a
740 : // custom policy name to the OPTIONS file that the test can then parse.
741 : type testingFilterPolicy struct {
742 : bloom.FilterPolicy
743 : }
744 :
745 : var _ pebble.FilterPolicy = (*testingFilterPolicy)(nil)
746 :
747 1 : func newTestingFilterPolicy(bitsPerKey int) *testingFilterPolicy {
748 1 : return &testingFilterPolicy{
749 1 : FilterPolicy: bloom.FilterPolicy(bitsPerKey),
750 1 : }
751 1 : }
752 :
753 : const testingFilterPolicyFmt = "testing_bloom_filter/bits_per_key=%d"
754 :
755 : // Name implements the pebble.FilterPolicy interface.
756 1 : func (t *testingFilterPolicy) Name() string {
757 1 : if t.FilterPolicy == 10 {
758 0 : return "rocksdb.BuiltinBloomFilter"
759 0 : }
760 1 : return fmt.Sprintf(testingFilterPolicyFmt, t.FilterPolicy)
761 : }
762 :
763 1 : func filterPolicyFromName(name string) (pebble.FilterPolicy, error) {
764 1 : switch name {
765 1 : case "none":
766 1 : return nil, nil
767 1 : case "rocksdb.BuiltinBloomFilter":
768 1 : return bloom.FilterPolicy(10), nil
769 : }
770 1 : var bitsPerKey int
771 1 : if _, err := fmt.Sscanf(name, testingFilterPolicyFmt, &bitsPerKey); err != nil {
772 0 : return nil, errors.Errorf("Invalid filter policy name '%s'", name)
773 0 : }
774 1 : return newTestingFilterPolicy(bitsPerKey), nil
775 : }
|