Line data Source code
1 : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package metamorphic
6 :
7 : import (
8 : "bytes"
9 : "fmt"
10 : "os"
11 : "path/filepath"
12 : "runtime"
13 : "strconv"
14 : "strings"
15 : "time"
16 :
17 : "github.com/cockroachdb/errors"
18 : "github.com/cockroachdb/pebble"
19 : "github.com/cockroachdb/pebble/bloom"
20 : "github.com/cockroachdb/pebble/internal/cache"
21 : "github.com/cockroachdb/pebble/internal/testkeys"
22 : "github.com/cockroachdb/pebble/objstorage/remote"
23 : "github.com/cockroachdb/pebble/sstable"
24 : "github.com/cockroachdb/pebble/vfs"
25 : "golang.org/x/exp/rand"
26 : )
27 :
28 : const (
29 : minimumFormatMajorVersion = pebble.FormatMinSupported
30 : // The format major version to use in the default options configurations. We
31 : // default to the minimum supported format so we exercise the runtime version
32 : // ratcheting that a cluster upgrading would experience. The randomized
33 : // options may still use format major versions that are less than
34 : // defaultFormatMajorVersion but are at least minimumFormatMajorVersion.
35 : defaultFormatMajorVersion = pebble.FormatMinSupported
36 : // newestFormatMajorVersionToTest is the most recent format major version
37 : // the metamorphic tests should use. This may be greater than
38 : // pebble.FormatNewest when some format major versions are marked as
39 : // experimental.
40 : newestFormatMajorVersionToTest = pebble.FormatNewest
41 : )
42 :
43 : func parseOptions(
44 : opts *TestOptions, data string, customOptionParsers map[string]func(string) (CustomOption, bool),
45 2 : ) error {
46 2 : hooks := &pebble.ParseHooks{
47 2 : NewCache: pebble.NewCache,
48 2 : NewFilterPolicy: filterPolicyFromName,
49 2 : SkipUnknown: func(name, value string) bool {
50 2 : switch name {
51 0 : case "TestOptions":
52 0 : return true
53 2 : case "TestOptions.strictfs":
54 2 : opts.strictFS = true
55 2 : return true
56 2 : case "TestOptions.ingest_using_apply":
57 2 : opts.ingestUsingApply = true
58 2 : return true
59 2 : case "TestOptions.delete_sized":
60 2 : opts.deleteSized = true
61 2 : return true
62 2 : case "TestOptions.replace_single_delete":
63 2 : opts.replaceSingleDelete = true
64 2 : return true
65 2 : case "TestOptions.use_disk":
66 2 : opts.useDisk = true
67 2 : return true
68 0 : case "TestOptions.initial_state_desc":
69 0 : opts.initialStateDesc = value
70 0 : return true
71 0 : case "TestOptions.initial_state_path":
72 0 : opts.initialStatePath = value
73 0 : return true
74 2 : case "TestOptions.threads":
75 2 : v, err := strconv.Atoi(value)
76 2 : if err != nil {
77 0 : panic(err)
78 : }
79 2 : opts.threads = v
80 2 : return true
81 2 : case "TestOptions.disable_block_property_collector":
82 2 : v, err := strconv.ParseBool(value)
83 2 : if err != nil {
84 0 : panic(err)
85 : }
86 2 : opts.disableBlockPropertyCollector = v
87 2 : if v {
88 2 : opts.Opts.BlockPropertyCollectors = nil
89 2 : }
90 2 : return true
91 2 : case "TestOptions.enable_value_blocks":
92 2 : opts.enableValueBlocks = true
93 2 : opts.Opts.Experimental.EnableValueBlocks = func() bool { return true }
94 2 : return true
95 2 : case "TestOptions.async_apply_to_db":
96 2 : opts.asyncApplyToDB = true
97 2 : return true
98 2 : case "TestOptions.shared_storage_enabled":
99 2 : opts.sharedStorageEnabled = true
100 2 : sharedStorage := remote.NewInMem()
101 2 : opts.Opts.Experimental.RemoteStorage = remote.MakeSimpleFactory(map[remote.Locator]remote.Storage{
102 2 : "": sharedStorage,
103 2 : })
104 2 : opts.sharedStorageFS = sharedStorage
105 2 : if opts.Opts.Experimental.CreateOnShared == remote.CreateOnSharedNone {
106 1 : opts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedAll
107 1 : }
108 2 : return true
109 2 : case "TestOptions.secondary_cache_enabled":
110 2 : opts.secondaryCacheEnabled = true
111 2 : opts.Opts.Experimental.SecondaryCacheSizeBytes = 1024 * 1024 * 32 // 32 MBs
112 2 : return true
113 2 : case "TestOptions.seed_efos":
114 2 : v, err := strconv.ParseUint(value, 10, 64)
115 2 : if err != nil {
116 0 : panic(err)
117 : }
118 2 : opts.seedEFOS = v
119 2 : return true
120 0 : case "TestOptions.ingest_split":
121 0 : opts.ingestSplit = true
122 0 : opts.Opts.Experimental.IngestSplit = func() bool {
123 0 : return true
124 0 : }
125 0 : return true
126 2 : case "TestOptions.use_shared_replicate":
127 2 : opts.useSharedReplicate = true
128 2 : return true
129 2 : case "TestOptions.use_excise":
130 2 : opts.useExcise = true
131 2 : return true
132 2 : case "TestOptions.efos_always_creates_iterators":
133 2 : opts.efosAlwaysCreatesIters = true
134 2 : opts.Opts.TestingAlwaysCreateEFOSIterators(true /* value */)
135 2 : return true
136 1 : default:
137 1 : if customOptionParsers == nil {
138 0 : return false
139 0 : }
140 1 : name = strings.TrimPrefix(name, "TestOptions.")
141 1 : if p, ok := customOptionParsers[name]; ok {
142 1 : if customOpt, ok := p(value); ok {
143 1 : opts.CustomOpts = append(opts.CustomOpts, customOpt)
144 1 : return true
145 1 : }
146 : }
147 0 : return false
148 : }
149 : },
150 : }
151 2 : err := opts.Opts.Parse(data, hooks)
152 2 : opts.Opts.EnsureDefaults()
153 2 : return err
154 : }
155 :
156 1 : func optionsToString(opts *TestOptions) string {
157 1 : var buf bytes.Buffer
158 1 : if opts.strictFS {
159 1 : fmt.Fprint(&buf, " strictfs=true\n")
160 1 : }
161 1 : if opts.ingestUsingApply {
162 1 : fmt.Fprint(&buf, " ingest_using_apply=true\n")
163 1 : }
164 1 : if opts.deleteSized {
165 1 : fmt.Fprint(&buf, " delete_sized=true\n")
166 1 : }
167 1 : if opts.replaceSingleDelete {
168 1 : fmt.Fprint(&buf, " replace_single_delete=true\n")
169 1 : }
170 1 : if opts.useDisk {
171 1 : fmt.Fprint(&buf, " use_disk=true\n")
172 1 : }
173 1 : if opts.initialStatePath != "" {
174 0 : fmt.Fprintf(&buf, " initial_state_path=%s\n", opts.initialStatePath)
175 0 : }
176 1 : if opts.initialStateDesc != "" {
177 0 : fmt.Fprintf(&buf, " initial_state_desc=%s\n", opts.initialStateDesc)
178 0 : }
179 1 : if opts.threads != 0 {
180 1 : fmt.Fprintf(&buf, " threads=%d\n", opts.threads)
181 1 : }
182 1 : if opts.disableBlockPropertyCollector {
183 1 : fmt.Fprintf(&buf, " disable_block_property_collector=%t\n", opts.disableBlockPropertyCollector)
184 1 : }
185 1 : if opts.enableValueBlocks {
186 1 : fmt.Fprintf(&buf, " enable_value_blocks=%t\n", opts.enableValueBlocks)
187 1 : }
188 1 : if opts.asyncApplyToDB {
189 1 : fmt.Fprint(&buf, " async_apply_to_db=true\n")
190 1 : }
191 1 : if opts.sharedStorageEnabled {
192 1 : fmt.Fprint(&buf, " shared_storage_enabled=true\n")
193 1 : }
194 1 : if opts.secondaryCacheEnabled {
195 1 : fmt.Fprint(&buf, " secondary_cache_enabled=true\n")
196 1 : }
197 1 : if opts.seedEFOS != 0 {
198 1 : fmt.Fprintf(&buf, " seed_efos=%d\n", opts.seedEFOS)
199 1 : }
200 1 : if opts.ingestSplit {
201 0 : fmt.Fprintf(&buf, " ingest_split=%v\n", opts.ingestSplit)
202 0 : }
203 1 : if opts.useSharedReplicate {
204 1 : fmt.Fprintf(&buf, " use_shared_replicate=%v\n", opts.useSharedReplicate)
205 1 : }
206 1 : if opts.useExcise {
207 1 : fmt.Fprintf(&buf, " use_excise=%v\n", opts.useExcise)
208 1 : }
209 1 : if opts.efosAlwaysCreatesIters {
210 1 : fmt.Fprintf(&buf, " efos_always_creates_iterators=%v\n", opts.efosAlwaysCreatesIters)
211 1 : }
212 1 : for _, customOpt := range opts.CustomOpts {
213 1 : fmt.Fprintf(&buf, " %s=%s\n", customOpt.Name(), customOpt.Value())
214 1 : }
215 :
216 1 : s := opts.Opts.String()
217 1 : if buf.Len() == 0 {
218 0 : return s
219 0 : }
220 1 : return s + "\n[TestOptions]\n" + buf.String()
221 : }
222 :
223 2 : func defaultTestOptions() *TestOptions {
224 2 : return &TestOptions{
225 2 : Opts: defaultOptions(),
226 2 : threads: 16,
227 2 : }
228 2 : }
229 :
230 2 : func defaultOptions() *pebble.Options {
231 2 : opts := &pebble.Options{
232 2 : Comparer: testkeys.Comparer,
233 2 : FS: vfs.NewMem(),
234 2 : FormatMajorVersion: defaultFormatMajorVersion,
235 2 : Levels: []pebble.LevelOptions{{
236 2 : FilterPolicy: bloom.FilterPolicy(10),
237 2 : }},
238 2 : BlockPropertyCollectors: blockPropertyCollectorConstructors,
239 2 : }
240 2 : // TODO(sumeer): add IneffectualSingleDeleteCallback that panics by
241 2 : // supporting a test option that does not generate ineffectual single
242 2 : // deletes.
243 2 : opts.Experimental.SingleDeleteInvariantViolationCallback = func(
244 2 : userKey []byte) {
245 0 : panic(errors.AssertionFailedf("single del invariant violations on key %q", userKey))
246 : }
247 2 : return opts
248 : }
249 :
250 : // TestOptions describes the options configuring an individual run of the
251 : // metamorphic tests.
252 : type TestOptions struct {
253 : // Opts holds the *pebble.Options for the test.
254 : Opts *pebble.Options
255 : // CustomOptions holds custom test options that are defined outside of this
256 : // package.
257 : CustomOpts []CustomOption
258 : useDisk bool
259 : strictFS bool
260 : threads int
261 : // Use Batch.Apply rather than DB.Ingest.
262 : ingestUsingApply bool
263 : // Use Batch.DeleteSized rather than Batch.Delete.
264 : deleteSized bool
265 : // Replace a SINGLEDEL with a DELETE.
266 : replaceSingleDelete bool
267 : // The path on the local filesystem where the initial state of the database
268 : // exists. Empty if the test run begins from an empty database state.
269 : initialStatePath string
270 : // A human-readable string describing the initial state of the database.
271 : // Empty if the test run begins from an empty database state.
272 : initialStateDesc string
273 : // Disable the block property collector, which may be used by block property
274 : // filters.
275 : disableBlockPropertyCollector bool
276 : // Enable the use of value blocks.
277 : enableValueBlocks bool
278 : // Use DB.ApplyNoSyncWait for applies that want to sync the WAL.
279 : asyncApplyToDB bool
280 : // Enable the use of shared storage.
281 : sharedStorageEnabled bool
282 : // sharedStorageFS stores the remote.Storage that is being used with shared
283 : // storage.
284 : sharedStorageFS remote.Storage
285 : // Enables the use of shared replication in TestOptions.
286 : useSharedReplicate bool
287 : // Enable the secondary cache. Only effective if sharedStorageEnabled is
288 : // also true.
289 : secondaryCacheEnabled bool
290 : // If nonzero, enables the use of EventuallyFileOnlySnapshots for
291 : // newSnapshotOps that are keyspan-bounded. The set of which newSnapshotOps
292 : // are actually created as EventuallyFileOnlySnapshots is deterministically
293 : // derived from the seed and the operation index.
294 : seedEFOS uint64
295 : // Enables ingest splits. Saved here for serialization as Options does not
296 : // serialize this.
297 : ingestSplit bool
298 : // Enables operations that do excises. Note that a false value for this does
299 : // not guarantee the lack of excises, as useSharedReplicate can also cause
300 : // excises. However !useExcise && !useSharedReplicate can be used to guarantee
301 : // lack of excises.
302 : useExcise bool
303 : // Enables EFOS to always create iterators, even if a conflicting excise
304 : // happens. Used to guarantee EFOS determinism when conflicting excises are
305 : // in play. If false, EFOS determinism is maintained by having the DB do a
306 : // flush after every new EFOS.
307 : efosAlwaysCreatesIters bool
308 : }
309 :
310 : // CustomOption defines a custom option that configures the behavior of an
311 : // individual test run. Like all test options, custom options are serialized to
312 : // the OPTIONS file even if they're not options ordinarily understood by Pebble.
313 : type CustomOption interface {
314 : // Name returns the name of the custom option. This is the key under which
315 : // the option appears in the OPTIONS file, within the [TestOptions] stanza.
316 : Name() string
317 : // Value returns the value of the custom option, serialized as it should
318 : // appear within the OPTIONS file.
319 : Value() string
320 : // Close is run after the test database has been closed at the end of the
321 : // test as well as during restart operations within the test sequence. It's
322 : // passed a copy of the *pebble.Options. If the custom options hold on to
323 : // any resources outside, Close should release them.
324 : Close(*pebble.Options) error
325 : // Open is run before the test runs and during a restart operation after the
326 : // test database has been closed and Close has been called. It's passed a
327 : // copy of the *pebble.Options. If the custom options must acquire any
328 : // resources before the test continues, it should reacquire them.
329 : Open(*pebble.Options) error
330 :
331 : // TODO(jackson): provide additional hooks for custom options changing the
332 : // behavior of a run.
333 : }
334 :
335 1 : func standardOptions() []*TestOptions {
336 1 : // The index labels are not strictly necessary, but they make it easier to
337 1 : // find which options correspond to a failure.
338 1 : stdOpts := []string{
339 1 : 0: "", // default options
340 1 : 1: `
341 1 : [Options]
342 1 : cache_size=1
343 1 : `,
344 1 : 2: `
345 1 : [Options]
346 1 : disable_wal=true
347 1 : `,
348 1 : 3: `
349 1 : [Options]
350 1 : l0_compaction_threshold=1
351 1 : `,
352 1 : 4: `
353 1 : [Options]
354 1 : l0_compaction_threshold=1
355 1 : l0_stop_writes_threshold=1
356 1 : `,
357 1 : 5: `
358 1 : [Options]
359 1 : lbase_max_bytes=1
360 1 : `,
361 1 : 6: `
362 1 : [Options]
363 1 : max_manifest_file_size=1
364 1 : `,
365 1 : 7: `
366 1 : [Options]
367 1 : max_open_files=1
368 1 : `,
369 1 : 8: `
370 1 : [Options]
371 1 : mem_table_size=2000
372 1 : `,
373 1 : 9: `
374 1 : [Options]
375 1 : mem_table_stop_writes_threshold=2
376 1 : `,
377 1 : 10: `
378 1 : [Options]
379 1 : wal_dir=data/wal
380 1 : `,
381 1 : 11: `
382 1 : [Level "0"]
383 1 : block_restart_interval=1
384 1 : `,
385 1 : 12: `
386 1 : [Level "0"]
387 1 : block_size=1
388 1 : `,
389 1 : 13: `
390 1 : [Level "0"]
391 1 : compression=NoCompression
392 1 : `,
393 1 : 14: `
394 1 : [Level "0"]
395 1 : index_block_size=1
396 1 : `,
397 1 : 15: `
398 1 : [Level "0"]
399 1 : target_file_size=1
400 1 : `,
401 1 : 16: `
402 1 : [Level "0"]
403 1 : filter_policy=none
404 1 : `,
405 1 : // 1GB
406 1 : 17: `
407 1 : [Options]
408 1 : bytes_per_sync=1073741824
409 1 : [TestOptions]
410 1 : strictfs=true
411 1 : `,
412 1 : 18: `
413 1 : [Options]
414 1 : max_concurrent_compactions=2
415 1 : `,
416 1 : 19: `
417 1 : [TestOptions]
418 1 : ingest_using_apply=true
419 1 : `,
420 1 : 20: `
421 1 : [TestOptions]
422 1 : replace_single_delete=true
423 1 : `,
424 1 : 21: `
425 1 : [TestOptions]
426 1 : use_disk=true
427 1 : `,
428 1 : 22: `
429 1 : [Options]
430 1 : max_writer_concurrency=2
431 1 : force_writer_parallelism=true
432 1 : `,
433 1 : 23: `
434 1 : [TestOptions]
435 1 : disable_block_property_collector=true
436 1 : `,
437 1 : 24: `
438 1 : [TestOptions]
439 1 : threads=1
440 1 : `,
441 1 : 25: `
442 1 : [TestOptions]
443 1 : enable_value_blocks=true
444 1 : `,
445 1 : 26: fmt.Sprintf(`
446 1 : [Options]
447 1 : format_major_version=%s
448 1 : `, newestFormatMajorVersionToTest),
449 1 : 27: `
450 1 : [TestOptions]
451 1 : shared_storage_enabled=true
452 1 : secondary_cache_enabled=true
453 1 : `,
454 1 : }
455 1 :
456 1 : opts := make([]*TestOptions, len(stdOpts))
457 1 : for i := range opts {
458 1 : opts[i] = defaultTestOptions()
459 1 : // NB: The standard options by definition can never include custom
460 1 : // options, so no need to propagate custom option parsers.
461 1 : if err := parseOptions(opts[i], stdOpts[i], nil /* custom option parsers */); err != nil {
462 0 : panic(err)
463 : }
464 : }
465 1 : return opts
466 : }
467 :
468 : func randomOptions(
469 : rng *rand.Rand, customOptionParsers map[string]func(string) (CustomOption, bool),
470 1 : ) *TestOptions {
471 1 : testOpts := defaultTestOptions()
472 1 : opts := testOpts.Opts
473 1 :
474 1 : // There are some private options, which we don't want users to fiddle with.
475 1 : // There's no way to set it through the public interface. The only method is
476 1 : // through Parse.
477 1 : {
478 1 : var privateOpts bytes.Buffer
479 1 : fmt.Fprintln(&privateOpts, `[Options]`)
480 1 : if rng.Intn(3) == 0 /* 33% */ {
481 1 : fmt.Fprintln(&privateOpts, ` disable_delete_only_compactions=true`)
482 1 : }
483 1 : if rng.Intn(3) == 0 /* 33% */ {
484 1 : fmt.Fprintln(&privateOpts, ` disable_elision_only_compactions=true`)
485 1 : }
486 1 : if rng.Intn(5) == 0 /* 20% */ {
487 1 : fmt.Fprintln(&privateOpts, ` disable_lazy_combined_iteration=true`)
488 1 : }
489 1 : if privateOptsStr := privateOpts.String(); privateOptsStr != `[Options]\n` {
490 1 : parseOptions(testOpts, privateOptsStr, customOptionParsers)
491 1 : }
492 : }
493 :
494 1 : opts.BytesPerSync = 1 << uint(rng.Intn(28)) // 1B - 256MB
495 1 : opts.Cache = cache.New(1 << uint(rng.Intn(30))) // 1B - 1GB
496 1 : opts.DisableWAL = rng.Intn(2) == 0
497 1 : opts.FlushDelayDeleteRange = time.Millisecond * time.Duration(5*rng.Intn(245)) // 5-250ms
498 1 : opts.FlushDelayRangeKey = time.Millisecond * time.Duration(5*rng.Intn(245)) // 5-250ms
499 1 : opts.FlushSplitBytes = 1 << rng.Intn(20) // 1B - 1MB
500 1 : opts.FormatMajorVersion = minimumFormatMajorVersion
501 1 : n := int(newestFormatMajorVersionToTest - opts.FormatMajorVersion)
502 1 : opts.FormatMajorVersion += pebble.FormatMajorVersion(rng.Intn(n + 1))
503 1 : opts.Experimental.L0CompactionConcurrency = 1 + rng.Intn(4) // 1-4
504 1 : opts.Experimental.LevelMultiplier = 5 << rng.Intn(7) // 5 - 320
505 1 : opts.TargetByteDeletionRate = 1 << uint(20+rng.Intn(10)) // 1MB - 1GB
506 1 : opts.Experimental.ValidateOnIngest = rng.Intn(2) != 0
507 1 : opts.L0CompactionThreshold = 1 + rng.Intn(100) // 1 - 100
508 1 : opts.L0CompactionFileThreshold = 1 << rng.Intn(11) // 1 - 1024
509 1 : opts.L0StopWritesThreshold = 1 + rng.Intn(100) // 1 - 100
510 1 : if opts.L0StopWritesThreshold < opts.L0CompactionThreshold {
511 1 : opts.L0StopWritesThreshold = opts.L0CompactionThreshold
512 1 : }
513 1 : opts.LBaseMaxBytes = 1 << uint(rng.Intn(30)) // 1B - 1GB
514 1 : maxConcurrentCompactions := rng.Intn(3) + 1 // 1-3
515 1 : opts.MaxConcurrentCompactions = func() int {
516 1 : return maxConcurrentCompactions
517 1 : }
518 1 : opts.MaxManifestFileSize = 1 << uint(rng.Intn(30)) // 1B - 1GB
519 1 : opts.MemTableSize = 2 << (10 + uint(rng.Intn(16))) // 2KB - 256MB
520 1 : opts.MemTableStopWritesThreshold = 2 + rng.Intn(5) // 2 - 5
521 1 : if rng.Intn(2) == 0 {
522 1 : opts.WALDir = "data/wal"
523 1 : }
524 1 : if rng.Intn(4) == 0 {
525 1 : // Enable Writer parallelism for 25% of the random options. Setting
526 1 : // MaxWriterConcurrency to any value greater than or equal to 1 has the
527 1 : // same effect currently.
528 1 : opts.Experimental.MaxWriterConcurrency = 2
529 1 : opts.Experimental.ForceWriterParallelism = true
530 1 : }
531 1 : if rng.Intn(2) == 0 {
532 1 : opts.Experimental.DisableIngestAsFlushable = func() bool { return true }
533 : }
534 :
535 : // We either use no multilevel compactions, multilevel compactions with the
536 : // default (zero) additional propensity, or multilevel compactions with an
537 : // additional propensity to encourage more multilevel compactions than we
538 : // ohterwise would.
539 1 : switch rng.Intn(3) {
540 1 : case 0:
541 1 : opts.Experimental.MultiLevelCompactionHeuristic = pebble.NoMultiLevel{}
542 1 : case 1:
543 1 : opts.Experimental.MultiLevelCompactionHeuristic = pebble.WriteAmpHeuristic{}
544 1 : default:
545 1 : opts.Experimental.MultiLevelCompactionHeuristic = pebble.WriteAmpHeuristic{
546 1 : AddPropensity: rng.Float64() * float64(rng.Intn(3)), // [0,3.0)
547 1 : AllowL0: rng.Intn(4) == 1, // 25% of the time
548 1 : }
549 : }
550 :
551 1 : var lopts pebble.LevelOptions
552 1 : lopts.BlockRestartInterval = 1 + rng.Intn(64) // 1 - 64
553 1 : lopts.BlockSize = 1 << uint(rng.Intn(24)) // 1 - 16MB
554 1 : lopts.BlockSizeThreshold = 50 + rng.Intn(50) // 50 - 100
555 1 : lopts.IndexBlockSize = 1 << uint(rng.Intn(24)) // 1 - 16MB
556 1 : lopts.TargetFileSize = 1 << uint(rng.Intn(28)) // 1 - 256MB
557 1 :
558 1 : // We either use no bloom filter, the default filter, or a filter with
559 1 : // randomized bits-per-key setting. We zero out the Filters map. It'll get
560 1 : // repopulated on EnsureDefaults accordingly.
561 1 : opts.Filters = nil
562 1 : switch rng.Intn(3) {
563 1 : case 0:
564 1 : lopts.FilterPolicy = nil
565 1 : case 1:
566 1 : lopts.FilterPolicy = bloom.FilterPolicy(10)
567 1 : default:
568 1 : lopts.FilterPolicy = newTestingFilterPolicy(1 << rng.Intn(5))
569 : }
570 :
571 : // We use either no compression, snappy compression or zstd compression.
572 1 : switch rng.Intn(3) {
573 1 : case 0:
574 1 : lopts.Compression = pebble.NoCompression
575 1 : case 1:
576 1 : lopts.Compression = pebble.ZstdCompression
577 1 : default:
578 1 : lopts.Compression = pebble.SnappyCompression
579 : }
580 1 : opts.Levels = []pebble.LevelOptions{lopts}
581 1 :
582 1 : // Explicitly disable disk-backed FS's for the random configurations. The
583 1 : // single standard test configuration that uses a disk-backed FS is
584 1 : // sufficient.
585 1 : testOpts.useDisk = false
586 1 : testOpts.strictFS = rng.Intn(2) != 0 // Only relevant for MemFS.
587 1 : testOpts.threads = rng.Intn(runtime.GOMAXPROCS(0)) + 1
588 1 : if testOpts.strictFS {
589 1 : opts.DisableWAL = false
590 1 : }
591 1 : testOpts.ingestUsingApply = rng.Intn(2) != 0
592 1 : testOpts.deleteSized = rng.Intn(2) != 0
593 1 : testOpts.replaceSingleDelete = rng.Intn(2) != 0
594 1 : testOpts.disableBlockPropertyCollector = rng.Intn(2) == 1
595 1 : if testOpts.disableBlockPropertyCollector {
596 1 : testOpts.Opts.BlockPropertyCollectors = nil
597 1 : }
598 1 : testOpts.enableValueBlocks = rng.Intn(2) != 0
599 1 : if testOpts.enableValueBlocks {
600 1 : testOpts.Opts.Experimental.EnableValueBlocks = func() bool { return true }
601 : }
602 1 : testOpts.asyncApplyToDB = rng.Intn(2) != 0
603 1 : // 20% of time, enable shared storage.
604 1 : if rng.Intn(5) == 0 {
605 1 : testOpts.sharedStorageEnabled = true
606 1 : inMemShared := remote.NewInMem()
607 1 : testOpts.Opts.Experimental.RemoteStorage = remote.MakeSimpleFactory(map[remote.Locator]remote.Storage{
608 1 : "": inMemShared,
609 1 : })
610 1 : testOpts.sharedStorageFS = inMemShared
611 1 : // If shared storage is enabled, pick between writing all files on shared
612 1 : // vs. lower levels only, 50% of the time.
613 1 : testOpts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedAll
614 1 : if rng.Intn(2) == 0 {
615 1 : testOpts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedLower
616 1 : }
617 : // If shared storage is enabled, enable secondary cache 50% of time.
618 1 : if rng.Intn(2) == 0 {
619 1 : testOpts.secondaryCacheEnabled = true
620 1 : // TODO(josh): Randomize various secondary cache settings.
621 1 : testOpts.Opts.Experimental.SecondaryCacheSizeBytes = 1024 * 1024 * 32 // 32 MBs
622 1 : }
623 : // 50% of the time, enable shared replication.
624 1 : testOpts.useSharedReplicate = rng.Intn(2) == 0
625 : }
626 1 : testOpts.seedEFOS = rng.Uint64()
627 1 : // TODO(bilal): Enable ingestSplit when known bugs with virtual sstables
628 1 : // are addressed.
629 1 : //
630 1 : // testOpts.ingestSplit = rng.Intn(2) == 0
631 1 : opts.Experimental.IngestSplit = func() bool { return testOpts.ingestSplit }
632 1 : testOpts.useExcise = rng.Intn(2) == 0
633 1 : if testOpts.useExcise || testOpts.useSharedReplicate {
634 1 : testOpts.efosAlwaysCreatesIters = rng.Intn(2) == 0
635 1 : opts.TestingAlwaysCreateEFOSIterators(testOpts.efosAlwaysCreatesIters)
636 1 : if testOpts.Opts.FormatMajorVersion < pebble.FormatVirtualSSTables {
637 1 : testOpts.Opts.FormatMajorVersion = pebble.FormatVirtualSSTables
638 1 : }
639 : }
640 1 : testOpts.Opts.EnsureDefaults()
641 1 : return testOpts
642 : }
643 :
644 1 : func setupInitialState(dataDir string, testOpts *TestOptions) error {
645 1 : // Copy (vfs.Default,<initialStatePath>/data) to (testOpts.opts.FS,<dataDir>).
646 1 : ok, err := vfs.Clone(
647 1 : vfs.Default,
648 1 : testOpts.Opts.FS,
649 1 : vfs.Default.PathJoin(testOpts.initialStatePath, "data"),
650 1 : dataDir,
651 1 : vfs.CloneSync,
652 1 : vfs.CloneSkip(func(filename string) bool {
653 1 : // Skip the archive of historical files, any checkpoints created by
654 1 : // operations and files staged for ingest in tmp.
655 1 : b := filepath.Base(filename)
656 1 : return b == "archive" || b == "checkpoints" || b == "tmp"
657 1 : }))
658 1 : if err != nil {
659 0 : return err
660 1 : } else if !ok {
661 0 : return os.ErrNotExist
662 0 : }
663 :
664 : // Tests with wal_dir set store their WALs in a `wal` directory. The source
665 : // database (initialStatePath) could've had wal_dir set, or the current test
666 : // options (testOpts) could have wal_dir set, or both.
667 1 : fs := testOpts.Opts.FS
668 1 : walDir := fs.PathJoin(dataDir, "wal")
669 1 : if err := fs.MkdirAll(walDir, os.ModePerm); err != nil {
670 0 : return err
671 0 : }
672 :
673 : // Copy <dataDir>/wal/*.log -> <dataDir>.
674 1 : src, dst := walDir, dataDir
675 1 : if testOpts.Opts.WALDir != "" {
676 0 : // Copy <dataDir>/*.log -> <dataDir>/wal.
677 0 : src, dst = dst, src
678 0 : }
679 1 : return moveLogs(fs, src, dst)
680 : }
681 :
682 1 : func moveLogs(fs vfs.FS, srcDir, dstDir string) error {
683 1 : ls, err := fs.List(srcDir)
684 1 : if err != nil {
685 0 : return err
686 0 : }
687 1 : for _, f := range ls {
688 0 : if filepath.Ext(f) != ".log" {
689 0 : continue
690 : }
691 0 : src := fs.PathJoin(srcDir, f)
692 0 : dst := fs.PathJoin(dstDir, f)
693 0 : if err := fs.Rename(src, dst); err != nil {
694 0 : return err
695 0 : }
696 : }
697 1 : return nil
698 : }
699 :
700 : var blockPropertyCollectorConstructors = []func() pebble.BlockPropertyCollector{
701 : sstable.NewTestKeysBlockPropertyCollector,
702 : }
703 :
704 : // testingFilterPolicy is used to allow bloom filter policies with non-default
705 : // bits-per-key setting. It is necessary because the name of the production
706 : // filter policy is fixed (see bloom.FilterPolicy.Name()); we need to output a
707 : // custom policy name to the OPTIONS file that the test can then parse.
708 : type testingFilterPolicy struct {
709 : bloom.FilterPolicy
710 : }
711 :
712 : var _ pebble.FilterPolicy = (*testingFilterPolicy)(nil)
713 :
714 2 : func newTestingFilterPolicy(bitsPerKey int) *testingFilterPolicy {
715 2 : return &testingFilterPolicy{
716 2 : FilterPolicy: bloom.FilterPolicy(bitsPerKey),
717 2 : }
718 2 : }
719 :
720 : const testingFilterPolicyFmt = "testing_bloom_filter/bits_per_key=%d"
721 :
722 : // Name implements the pebble.FilterPolicy interface.
723 2 : func (t *testingFilterPolicy) Name() string {
724 2 : if t.FilterPolicy == 10 {
725 0 : return "rocksdb.BuiltinBloomFilter"
726 0 : }
727 2 : return fmt.Sprintf(testingFilterPolicyFmt, t.FilterPolicy)
728 : }
729 :
730 2 : func filterPolicyFromName(name string) (pebble.FilterPolicy, error) {
731 2 : switch name {
732 2 : case "none":
733 2 : return nil, nil
734 2 : case "rocksdb.BuiltinBloomFilter":
735 2 : return bloom.FilterPolicy(10), nil
736 : }
737 2 : var bitsPerKey int
738 2 : if _, err := fmt.Sscanf(name, testingFilterPolicyFmt, &bitsPerKey); err != nil {
739 0 : return nil, errors.Errorf("Invalid filter policy name '%s'", name)
740 0 : }
741 2 : return newTestingFilterPolicy(bitsPerKey), nil
742 : }
|