Line data Source code
1 : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package metamorphic
6 :
7 : import (
8 : "bytes"
9 : "fmt"
10 : "math"
11 : "os"
12 : "path/filepath"
13 : "runtime"
14 : "strconv"
15 : "strings"
16 : "time"
17 :
18 : "github.com/cockroachdb/errors"
19 : "github.com/cockroachdb/pebble"
20 : "github.com/cockroachdb/pebble/bloom"
21 : "github.com/cockroachdb/pebble/internal/base"
22 : "github.com/cockroachdb/pebble/internal/cache"
23 : "github.com/cockroachdb/pebble/internal/testkeys"
24 : "github.com/cockroachdb/pebble/objstorage/remote"
25 : "github.com/cockroachdb/pebble/sstable"
26 : "github.com/cockroachdb/pebble/vfs"
27 : "github.com/cockroachdb/pebble/wal"
28 : "golang.org/x/exp/rand"
29 : )
30 :
31 : const (
32 : minimumFormatMajorVersion = pebble.FormatMinSupported
33 : // The format major version to use in the default options configurations. We
34 : // default to the minimum supported format so we exercise the runtime version
35 : // ratcheting that a cluster upgrading would experience. The randomized
36 : // options may still use format major versions that are less than
37 : // defaultFormatMajorVersion but are at least minimumFormatMajorVersion.
38 : defaultFormatMajorVersion = pebble.FormatMinSupported
39 : // newestFormatMajorVersionToTest is the most recent format major version
40 : // the metamorphic tests should use. This may be greater than
41 : // pebble.FormatNewest when some format major versions are marked as
42 : // experimental.
43 : newestFormatMajorVersionToTest = pebble.FormatNewest
44 : )
45 :
46 : func parseOptions(
47 : opts *TestOptions, data string, customOptionParsers map[string]func(string) (CustomOption, bool),
48 1 : ) error {
49 1 : hooks := &pebble.ParseHooks{
50 1 : NewCache: pebble.NewCache,
51 1 : NewFilterPolicy: filterPolicyFromName,
52 1 : SkipUnknown: func(name, value string) bool {
53 1 : switch name {
54 0 : case "TestOptions":
55 0 : return true
56 1 : case "TestOptions.strictfs":
57 1 : opts.strictFS = true
58 1 : opts.Opts.FS = vfs.NewStrictMem()
59 1 : return true
60 1 : case "TestOptions.ingest_using_apply":
61 1 : opts.ingestUsingApply = true
62 1 : return true
63 1 : case "TestOptions.delete_sized":
64 1 : opts.deleteSized = true
65 1 : return true
66 1 : case "TestOptions.replace_single_delete":
67 1 : opts.replaceSingleDelete = true
68 1 : return true
69 1 : case "TestOptions.use_disk":
70 1 : opts.useDisk = true
71 1 : opts.Opts.FS = vfs.Default
72 1 : return true
73 0 : case "TestOptions.initial_state_desc":
74 0 : opts.initialStateDesc = value
75 0 : return true
76 0 : case "TestOptions.initial_state_path":
77 0 : opts.initialStatePath = value
78 0 : return true
79 1 : case "TestOptions.threads":
80 1 : v, err := strconv.Atoi(value)
81 1 : if err != nil {
82 0 : panic(err)
83 : }
84 1 : opts.Threads = v
85 1 : return true
86 1 : case "TestOptions.disable_block_property_collector":
87 1 : v, err := strconv.ParseBool(value)
88 1 : if err != nil {
89 0 : panic(err)
90 : }
91 1 : opts.disableBlockPropertyCollector = v
92 1 : if v {
93 1 : opts.Opts.BlockPropertyCollectors = nil
94 1 : }
95 1 : return true
96 1 : case "TestOptions.enable_value_blocks":
97 1 : opts.enableValueBlocks = true
98 1 : opts.Opts.Experimental.EnableValueBlocks = func() bool { return true }
99 1 : return true
100 1 : case "TestOptions.disable_value_blocks_for_ingest_sstables":
101 1 : opts.disableValueBlocksForIngestSSTables = true
102 1 : return true
103 1 : case "TestOptions.async_apply_to_db":
104 1 : opts.asyncApplyToDB = true
105 1 : return true
106 1 : case "TestOptions.shared_storage_enabled":
107 1 : opts.sharedStorageEnabled = true
108 1 : opts.sharedStorageFS = remote.NewInMem()
109 1 : if opts.Opts.Experimental.CreateOnShared == remote.CreateOnSharedNone {
110 0 : opts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedAll
111 0 : }
112 1 : return true
113 1 : case "TestOptions.external_storage_enabled":
114 1 : opts.externalStorageEnabled = true
115 1 : opts.externalStorageFS = remote.NewInMem()
116 1 : return true
117 1 : case "TestOptions.secondary_cache_enabled":
118 1 : opts.secondaryCacheEnabled = true
119 1 : opts.Opts.Experimental.SecondaryCacheSizeBytes = 1024 * 1024 * 32 // 32 MBs
120 1 : return true
121 1 : case "TestOptions.seed_efos":
122 1 : v, err := strconv.ParseUint(value, 10, 64)
123 1 : if err != nil {
124 0 : panic(err)
125 : }
126 1 : opts.seedEFOS = v
127 1 : return true
128 1 : case "TestOptions.io_latency_mean":
129 1 : v, err := time.ParseDuration(value)
130 1 : if err != nil {
131 0 : panic(err)
132 : }
133 1 : opts.ioLatencyMean = v
134 1 : return true
135 1 : case "TestOptions.io_latency_probability":
136 1 : v, err := strconv.ParseFloat(value, 64)
137 1 : if err != nil {
138 0 : panic(err)
139 : }
140 1 : opts.ioLatencyProbability = v
141 1 : return true
142 1 : case "TestOptions.io_latency_seed":
143 1 : v, err := strconv.ParseInt(value, 10, 64)
144 1 : if err != nil {
145 0 : panic(err)
146 : }
147 1 : opts.ioLatencySeed = v
148 1 : return true
149 0 : case "TestOptions.ingest_split":
150 0 : opts.ingestSplit = true
151 0 : opts.Opts.Experimental.IngestSplit = func() bool {
152 0 : return true
153 0 : }
154 0 : return true
155 1 : case "TestOptions.use_shared_replicate":
156 1 : opts.useSharedReplicate = true
157 1 : return true
158 1 : case "TestOptions.use_excise":
159 1 : opts.useExcise = true
160 1 : return true
161 0 : default:
162 0 : if customOptionParsers == nil {
163 0 : return false
164 0 : }
165 0 : name = strings.TrimPrefix(name, "TestOptions.")
166 0 : if p, ok := customOptionParsers[name]; ok {
167 0 : if customOpt, ok := p(value); ok {
168 0 : opts.CustomOpts = append(opts.CustomOpts, customOpt)
169 0 : return true
170 0 : }
171 : }
172 0 : return false
173 : }
174 : },
175 : }
176 1 : err := opts.Opts.Parse(data, hooks)
177 1 : // Ensure that the WAL failover FS agrees with the primary FS. They're
178 1 : // separate options, but in the metamorphic tests we keep them in sync.
179 1 : if opts.Opts.WALFailover != nil {
180 1 : opts.Opts.WALFailover.Secondary.FS = opts.Opts.FS
181 1 : }
182 1 : opts.InitRemoteStorageFactory()
183 1 : opts.Opts.EnsureDefaults()
184 1 : return err
185 : }
186 :
187 0 : func optionsToString(opts *TestOptions) string {
188 0 : var buf bytes.Buffer
189 0 : if opts.strictFS {
190 0 : fmt.Fprint(&buf, " strictfs=true\n")
191 0 : }
192 0 : if opts.ingestUsingApply {
193 0 : fmt.Fprint(&buf, " ingest_using_apply=true\n")
194 0 : }
195 0 : if opts.deleteSized {
196 0 : fmt.Fprint(&buf, " delete_sized=true\n")
197 0 : }
198 0 : if opts.replaceSingleDelete {
199 0 : fmt.Fprint(&buf, " replace_single_delete=true\n")
200 0 : }
201 0 : if opts.useDisk {
202 0 : fmt.Fprint(&buf, " use_disk=true\n")
203 0 : }
204 0 : if opts.initialStatePath != "" {
205 0 : fmt.Fprintf(&buf, " initial_state_path=%s\n", opts.initialStatePath)
206 0 : }
207 0 : if opts.initialStateDesc != "" {
208 0 : fmt.Fprintf(&buf, " initial_state_desc=%s\n", opts.initialStateDesc)
209 0 : }
210 0 : if opts.Threads != 0 {
211 0 : fmt.Fprintf(&buf, " threads=%d\n", opts.Threads)
212 0 : }
213 0 : if opts.disableBlockPropertyCollector {
214 0 : fmt.Fprintf(&buf, " disable_block_property_collector=%t\n", opts.disableBlockPropertyCollector)
215 0 : }
216 0 : if opts.enableValueBlocks {
217 0 : fmt.Fprintf(&buf, " enable_value_blocks=%t\n", opts.enableValueBlocks)
218 0 : }
219 0 : if opts.disableValueBlocksForIngestSSTables {
220 0 : fmt.Fprintf(&buf, " disable_value_blocks_for_ingest_sstables=%t\n", opts.disableValueBlocksForIngestSSTables)
221 0 : }
222 0 : if opts.asyncApplyToDB {
223 0 : fmt.Fprint(&buf, " async_apply_to_db=true\n")
224 0 : }
225 0 : if opts.sharedStorageEnabled {
226 0 : fmt.Fprint(&buf, " shared_storage_enabled=true\n")
227 0 : }
228 0 : if opts.externalStorageEnabled {
229 0 : fmt.Fprint(&buf, " external_storage_enabled=true\n")
230 0 : }
231 0 : if opts.secondaryCacheEnabled {
232 0 : fmt.Fprint(&buf, " secondary_cache_enabled=true\n")
233 0 : }
234 0 : if opts.seedEFOS != 0 {
235 0 : fmt.Fprintf(&buf, " seed_efos=%d\n", opts.seedEFOS)
236 0 : }
237 0 : if opts.ingestSplit {
238 0 : fmt.Fprintf(&buf, " ingest_split=%v\n", opts.ingestSplit)
239 0 : }
240 0 : if opts.ioLatencyProbability > 0.0 {
241 0 : fmt.Fprintf(&buf, " io_latency_mean=%s\n", opts.ioLatencyMean)
242 0 : fmt.Fprintf(&buf, " io_latency_probability=%f\n", opts.ioLatencyProbability)
243 0 : fmt.Fprintf(&buf, " io_latency_seed=%d\n", opts.ioLatencySeed)
244 0 : }
245 0 : if opts.useSharedReplicate {
246 0 : fmt.Fprintf(&buf, " use_shared_replicate=%v\n", opts.useSharedReplicate)
247 0 : }
248 0 : if opts.useExcise {
249 0 : fmt.Fprintf(&buf, " use_excise=%v\n", opts.useExcise)
250 0 : }
251 0 : for _, customOpt := range opts.CustomOpts {
252 0 : fmt.Fprintf(&buf, " %s=%s\n", customOpt.Name(), customOpt.Value())
253 0 : }
254 :
255 0 : s := opts.Opts.String()
256 0 : if buf.Len() == 0 {
257 0 : return s
258 0 : }
259 0 : return s + "\n[TestOptions]\n" + buf.String()
260 : }
261 :
262 1 : func defaultTestOptions() *TestOptions {
263 1 : return &TestOptions{
264 1 : Opts: defaultOptions(),
265 1 : Threads: 16,
266 1 : RetryPolicy: NeverRetry,
267 1 : }
268 1 : }
269 :
270 1 : func defaultOptions() *pebble.Options {
271 1 : opts := &pebble.Options{
272 1 : // Use an archive cleaner to ease post-mortem debugging.
273 1 : Cleaner: base.ArchiveCleaner{},
274 1 : // Always use our custom comparer which provides a Split method,
275 1 : // splitting keys at the trailing '@'.
276 1 : Comparer: testkeys.Comparer,
277 1 : DebugCheck: pebble.DebugCheckLevels,
278 1 : FS: vfs.NewMem(),
279 1 : FormatMajorVersion: defaultFormatMajorVersion,
280 1 : Levels: []pebble.LevelOptions{{
281 1 : FilterPolicy: bloom.FilterPolicy(10),
282 1 : }},
283 1 : BlockPropertyCollectors: blockPropertyCollectorConstructors,
284 1 : }
285 1 : return opts
286 1 : }
287 :
288 : // TestOptions describes the options configuring an individual run of the
289 : // metamorphic tests.
290 : type TestOptions struct {
291 : // Opts holds the *pebble.Options for the test.
292 : Opts *pebble.Options
293 : // Threads configures the parallelism of the test. Each thread will run in
294 : // an independent goroutine and be responsible for executing operations
295 : // against an independent set of objects. The outcome of any individual
296 : // operation will still be deterministic, with the metamorphic test
297 : // inserting synchronization where necessary.
298 : Threads int
299 : // RetryPolicy configures which errors should be retried.
300 : RetryPolicy RetryPolicy
301 : // CustomOptions holds custom test options that are defined outside of this
302 : // package.
303 : CustomOpts []CustomOption
304 :
305 : // internal
306 :
307 : useDisk bool
308 : strictFS bool
309 : // Use Batch.Apply rather than DB.Ingest.
310 : ingestUsingApply bool
311 : // Use Batch.DeleteSized rather than Batch.Delete.
312 : deleteSized bool
313 : // Replace a SINGLEDEL with a DELETE.
314 : replaceSingleDelete bool
315 : // The path on the local filesystem where the initial state of the database
316 : // exists. Empty if the test run begins from an empty database state.
317 : initialStatePath string
318 : // A human-readable string describing the initial state of the database.
319 : // Empty if the test run begins from an empty database state.
320 : initialStateDesc string
321 : // Disable the block property collector, which may be used by block property
322 : // filters.
323 : disableBlockPropertyCollector bool
324 : // Enable the use of value blocks.
325 : enableValueBlocks bool
326 : // Disables value blocks in the sstables written for ingest.
327 : disableValueBlocksForIngestSSTables bool
328 : // Use DB.ApplyNoSyncWait for applies that want to sync the WAL.
329 : asyncApplyToDB bool
330 : // Enable the use of shared storage.
331 : sharedStorageEnabled bool
332 : sharedStorageFS remote.Storage
333 : // Enable the use of shared storage for external file ingestion.
334 : externalStorageEnabled bool
335 : externalStorageFS remote.Storage
336 : // Enables the use of shared replication in TestOptions.
337 : useSharedReplicate bool
338 : // Enable the secondary cache. Only effective if sharedStorageEnabled is
339 : // also true.
340 : secondaryCacheEnabled bool
341 : // If nonzero, enables the use of EventuallyFileOnlySnapshots for
342 : // newSnapshotOps that are keyspan-bounded. The set of which newSnapshotOps
343 : // are actually created as EventuallyFileOnlySnapshots is deterministically
344 : // derived from the seed and the operation index.
345 : seedEFOS uint64
346 : // If nonzero, enables the injection of random IO latency. The mechanics of
347 : // a Pebble operation can be very timing dependent, so artificial latency
348 : // can ensure a wide variety of mechanics are exercised. Additionally,
349 : // exercising some mechanics such as WAL failover require IO latency.
350 : ioLatencyProbability float64
351 : ioLatencySeed int64
352 : ioLatencyMean time.Duration
353 : // Enables ingest splits. Saved here for serialization as Options does not
354 : // serialize this.
355 : ingestSplit bool
356 : // Enables operations that do excises. Note that a false value for this does
357 : // not guarantee the lack of excises, as useSharedReplicate can also cause
358 : // excises. However !useExcise && !useSharedReplicate can be used to guarantee
359 : // lack of excises.
360 : useExcise bool
361 : }
362 :
363 : // InitRemoteStorageFactory initializes Opts.Experimental.RemoteStorage.
364 1 : func (testOpts *TestOptions) InitRemoteStorageFactory() {
365 1 : if testOpts.sharedStorageEnabled || testOpts.externalStorageEnabled {
366 1 : m := make(map[remote.Locator]remote.Storage)
367 1 : if testOpts.sharedStorageEnabled {
368 1 : m[""] = testOpts.sharedStorageFS
369 1 : }
370 1 : if testOpts.externalStorageEnabled {
371 1 : m["external"] = testOpts.externalStorageFS
372 1 : }
373 1 : testOpts.Opts.Experimental.RemoteStorage = remote.MakeSimpleFactory(m)
374 : }
375 : }
376 :
377 : // CustomOption defines a custom option that configures the behavior of an
378 : // individual test run. Like all test options, custom options are serialized to
379 : // the OPTIONS file even if they're not options ordinarily understood by Pebble.
380 : type CustomOption interface {
381 : // Name returns the name of the custom option. This is the key under which
382 : // the option appears in the OPTIONS file, within the [TestOptions] stanza.
383 : Name() string
384 : // Value returns the value of the custom option, serialized as it should
385 : // appear within the OPTIONS file.
386 : Value() string
387 : // Close is run after the test database has been closed at the end of the
388 : // test as well as during restart operations within the test sequence. It's
389 : // passed a copy of the *pebble.Options. If the custom options hold on to
390 : // any resources outside, Close should release them.
391 : Close(*pebble.Options) error
392 : // Open is run before the test runs and during a restart operation after the
393 : // test database has been closed and Close has been called. It's passed a
394 : // copy of the *pebble.Options. If the custom options must acquire any
395 : // resources before the test continues, it should reacquire them.
396 : Open(*pebble.Options) error
397 :
398 : // TODO(jackson): provide additional hooks for custom options changing the
399 : // behavior of a run.
400 : }
401 :
402 0 : func standardOptions() []*TestOptions {
403 0 : // The index labels are not strictly necessary, but they make it easier to
404 0 : // find which options correspond to a failure.
405 0 : stdOpts := []string{
406 0 : 0: "", // default options
407 0 : 1: `
408 0 : [Options]
409 0 : cache_size=1
410 0 : `,
411 0 : 2: `
412 0 : [Options]
413 0 : disable_wal=true
414 0 : `,
415 0 : 3: `
416 0 : [Options]
417 0 : l0_compaction_threshold=1
418 0 : `,
419 0 : 4: `
420 0 : [Options]
421 0 : l0_compaction_threshold=1
422 0 : l0_stop_writes_threshold=1
423 0 : `,
424 0 : 5: `
425 0 : [Options]
426 0 : lbase_max_bytes=1
427 0 : `,
428 0 : 6: `
429 0 : [Options]
430 0 : max_manifest_file_size=1
431 0 : `,
432 0 : 7: `
433 0 : [Options]
434 0 : max_open_files=1
435 0 : `,
436 0 : 8: `
437 0 : [Options]
438 0 : mem_table_size=2000
439 0 : `,
440 0 : 9: `
441 0 : [Options]
442 0 : mem_table_stop_writes_threshold=2
443 0 : `,
444 0 : 10: `
445 0 : [Options]
446 0 : wal_dir=data/wal
447 0 : `,
448 0 : 11: `
449 0 : [Level "0"]
450 0 : block_restart_interval=1
451 0 : `,
452 0 : 12: `
453 0 : [Level "0"]
454 0 : block_size=1
455 0 : `,
456 0 : 13: `
457 0 : [Level "0"]
458 0 : compression=NoCompression
459 0 : `,
460 0 : 14: `
461 0 : [Level "0"]
462 0 : index_block_size=1
463 0 : `,
464 0 : 15: `
465 0 : [Level "0"]
466 0 : target_file_size=1
467 0 : `,
468 0 : 16: `
469 0 : [Level "0"]
470 0 : filter_policy=none
471 0 : `,
472 0 : // 1GB
473 0 : 17: `
474 0 : [Options]
475 0 : bytes_per_sync=1073741824
476 0 : [TestOptions]
477 0 : strictfs=true
478 0 : `,
479 0 : 18: `
480 0 : [Options]
481 0 : max_concurrent_compactions=2
482 0 : `,
483 0 : 19: `
484 0 : [TestOptions]
485 0 : ingest_using_apply=true
486 0 : `,
487 0 : 20: `
488 0 : [TestOptions]
489 0 : replace_single_delete=true
490 0 : `,
491 0 : 21: `
492 0 : [TestOptions]
493 0 : use_disk=true
494 0 : `,
495 0 : 22: `
496 0 : [Options]
497 0 : max_writer_concurrency=2
498 0 : force_writer_parallelism=true
499 0 : `,
500 0 : 23: `
501 0 : [TestOptions]
502 0 : disable_block_property_collector=true
503 0 : `,
504 0 : 24: `
505 0 : [TestOptions]
506 0 : threads=1
507 0 : `,
508 0 : 25: `
509 0 : [TestOptions]
510 0 : enable_value_blocks=true
511 0 : `,
512 0 : 26: fmt.Sprintf(`
513 0 : [Options]
514 0 : format_major_version=%s
515 0 : `, newestFormatMajorVersionToTest),
516 0 : 27: fmt.Sprintf(`
517 0 : [Options]
518 0 : format_major_version=%s
519 0 : [TestOptions]
520 0 : shared_storage_enabled=true
521 0 : secondary_cache_enabled=true
522 0 : `, pebble.FormatMinForSharedObjects),
523 0 : 28: fmt.Sprintf(`
524 0 : [Options]
525 0 : format_major_version=%s
526 0 : [TestOptions]
527 0 : external_storage_enabled=true
528 0 : `, pebble.FormatSyntheticPrefixSuffix),
529 0 : 29: fmt.Sprintf(`
530 0 : [Options]
531 0 : format_major_version=%s
532 0 : [TestOptions]
533 0 : shared_storage_enabled=true
534 0 : external_storage_enabled=true
535 0 : secondary_cache_enabled=false
536 0 : `, pebble.FormatSyntheticPrefixSuffix),
537 0 : }
538 0 :
539 0 : opts := make([]*TestOptions, len(stdOpts))
540 0 : for i := range opts {
541 0 : opts[i] = defaultTestOptions()
542 0 : // NB: The standard options by definition can never include custom
543 0 : // options, so no need to propagate custom option parsers.
544 0 : if err := parseOptions(opts[i], stdOpts[i], nil /* custom option parsers */); err != nil {
545 0 : panic(err)
546 : }
547 : }
548 0 : return opts
549 : }
550 :
551 : // RandomOptions generates a random set of operations, drawing randomness from
552 : // rng.
553 : func RandomOptions(
554 : rng *rand.Rand, customOptionParsers map[string]func(string) (CustomOption, bool),
555 0 : ) *TestOptions {
556 0 : testOpts := defaultTestOptions()
557 0 : opts := testOpts.Opts
558 0 :
559 0 : // There are some private options, which we don't want users to fiddle with.
560 0 : // There's no way to set it through the public interface. The only method is
561 0 : // through Parse.
562 0 : {
563 0 : var privateOpts bytes.Buffer
564 0 : fmt.Fprintln(&privateOpts, `[Options]`)
565 0 : if rng.Intn(3) == 0 /* 33% */ {
566 0 : fmt.Fprintln(&privateOpts, ` disable_delete_only_compactions=true`)
567 0 : }
568 0 : if rng.Intn(3) == 0 /* 33% */ {
569 0 : fmt.Fprintln(&privateOpts, ` disable_elision_only_compactions=true`)
570 0 : }
571 0 : if rng.Intn(5) == 0 /* 20% */ {
572 0 : fmt.Fprintln(&privateOpts, ` disable_lazy_combined_iteration=true`)
573 0 : }
574 0 : if privateOptsStr := privateOpts.String(); privateOptsStr != `[Options]\n` {
575 0 : parseOptions(testOpts, privateOptsStr, customOptionParsers)
576 0 : }
577 : }
578 :
579 0 : opts.BytesPerSync = 1 << uint(rng.Intn(28)) // 1B - 256MB
580 0 : opts.Cache = cache.New(1 << uint(rng.Intn(30))) // 1B - 1GB
581 0 : opts.DisableWAL = rng.Intn(2) == 0
582 0 : opts.FlushDelayDeleteRange = time.Millisecond * time.Duration(5*rng.Intn(245)) // 5-250ms
583 0 : opts.FlushDelayRangeKey = time.Millisecond * time.Duration(5*rng.Intn(245)) // 5-250ms
584 0 : opts.FlushSplitBytes = 1 << rng.Intn(20) // 1B - 1MB
585 0 : opts.FormatMajorVersion = minimumFormatMajorVersion
586 0 : n := int(newestFormatMajorVersionToTest - opts.FormatMajorVersion)
587 0 : opts.FormatMajorVersion += pebble.FormatMajorVersion(rng.Intn(n + 1))
588 0 : opts.Experimental.L0CompactionConcurrency = 1 + rng.Intn(4) // 1-4
589 0 : opts.Experimental.LevelMultiplier = 5 << rng.Intn(7) // 5 - 320
590 0 : opts.TargetByteDeletionRate = 1 << uint(20+rng.Intn(10)) // 1MB - 1GB
591 0 : opts.Experimental.ValidateOnIngest = rng.Intn(2) != 0
592 0 : opts.L0CompactionThreshold = 1 + rng.Intn(100) // 1 - 100
593 0 : opts.L0CompactionFileThreshold = 1 << rng.Intn(11) // 1 - 1024
594 0 : opts.L0StopWritesThreshold = 1 + rng.Intn(100) // 1 - 100
595 0 : if opts.L0StopWritesThreshold < opts.L0CompactionThreshold {
596 0 : opts.L0StopWritesThreshold = opts.L0CompactionThreshold
597 0 : }
598 0 : opts.LBaseMaxBytes = 1 << uint(rng.Intn(30)) // 1B - 1GB
599 0 : maxConcurrentCompactions := rng.Intn(3) + 1 // 1-3
600 0 : opts.MaxConcurrentCompactions = func() int {
601 0 : return maxConcurrentCompactions
602 0 : }
603 0 : opts.MaxManifestFileSize = 1 << uint(rng.Intn(30)) // 1B - 1GB
604 0 : opts.MemTableSize = 2 << (10 + uint(rng.Intn(16))) // 2KB - 256MB
605 0 : opts.MemTableStopWritesThreshold = 2 + rng.Intn(5) // 2 - 5
606 0 : if rng.Intn(2) == 0 {
607 0 : opts.WALDir = "data/wal"
608 0 : }
609 :
610 : // Half the time enable WAL failover.
611 0 : if rng.Intn(2) == 0 {
612 0 : // Use 10x longer durations when writing directly to FS; we don't want
613 0 : // WAL failover to trigger excessively frequently.
614 0 : referenceDur := time.Millisecond
615 0 : if testOpts.useDisk {
616 0 : referenceDur *= 10
617 0 : }
618 :
619 0 : scaleDuration := func(d time.Duration, minFactor, maxFactor float64) time.Duration {
620 0 : return time.Duration(float64(d) * (minFactor + rng.Float64()*(maxFactor-minFactor)))
621 0 : }
622 0 : unhealthyThreshold := expRandDuration(rng, 3*referenceDur, time.Second)
623 0 : healthyThreshold := expRandDuration(rng, 3*referenceDur, time.Second)
624 0 : healthyInterval := scaleDuration(healthyThreshold, 1.0, 10.0) // Between 1-10x the healthy threshold
625 0 : opts.WALFailover = &pebble.WALFailoverOptions{
626 0 : Secondary: wal.Dir{FS: vfs.Default, Dirname: "data/wal_secondary"},
627 0 : FailoverOptions: wal.FailoverOptions{
628 0 : PrimaryDirProbeInterval: scaleDuration(healthyThreshold, 0.10, 0.50), // Between 10-50% of the healthy threshold
629 0 : HealthyProbeLatencyThreshold: healthyThreshold,
630 0 : HealthyInterval: healthyInterval,
631 0 : UnhealthySamplingInterval: scaleDuration(unhealthyThreshold, 0.10, 0.50), // Between 10-50% of the unhealthy threshold
632 0 : UnhealthyOperationLatencyThreshold: func() time.Duration {
633 0 : return unhealthyThreshold
634 0 : },
635 : ElevatedWriteStallThresholdLag: expRandDuration(rng, 5*referenceDur, 2*time.Second),
636 : },
637 : }
638 : }
639 0 : if rng.Intn(4) == 0 {
640 0 : // Enable Writer parallelism for 25% of the random options. Setting
641 0 : // MaxWriterConcurrency to any value greater than or equal to 1 has the
642 0 : // same effect currently.
643 0 : opts.Experimental.MaxWriterConcurrency = 2
644 0 : opts.Experimental.ForceWriterParallelism = true
645 0 : }
646 0 : if rng.Intn(2) == 0 {
647 0 : opts.Experimental.DisableIngestAsFlushable = func() bool { return true }
648 : }
649 :
650 : // We either use no multilevel compactions, multilevel compactions with the
651 : // default (zero) additional propensity, or multilevel compactions with an
652 : // additional propensity to encourage more multilevel compactions than we
653 : // ohterwise would.
654 0 : switch rng.Intn(3) {
655 0 : case 0:
656 0 : opts.Experimental.MultiLevelCompactionHeuristic = pebble.NoMultiLevel{}
657 0 : case 1:
658 0 : opts.Experimental.MultiLevelCompactionHeuristic = pebble.WriteAmpHeuristic{}
659 0 : default:
660 0 : opts.Experimental.MultiLevelCompactionHeuristic = pebble.WriteAmpHeuristic{
661 0 : AddPropensity: rng.Float64() * float64(rng.Intn(3)), // [0,3.0)
662 0 : AllowL0: rng.Intn(4) == 1, // 25% of the time
663 0 : }
664 : }
665 :
666 0 : var lopts pebble.LevelOptions
667 0 : lopts.BlockRestartInterval = 1 + rng.Intn(64) // 1 - 64
668 0 : lopts.BlockSize = 1 << uint(rng.Intn(24)) // 1 - 16MB
669 0 : lopts.BlockSizeThreshold = 50 + rng.Intn(50) // 50 - 100
670 0 : lopts.IndexBlockSize = 1 << uint(rng.Intn(24)) // 1 - 16MB
671 0 : lopts.TargetFileSize = 1 << uint(rng.Intn(28)) // 1 - 256MB
672 0 :
673 0 : // We either use no bloom filter, the default filter, or a filter with
674 0 : // randomized bits-per-key setting. We zero out the Filters map. It'll get
675 0 : // repopulated on EnsureDefaults accordingly.
676 0 : opts.Filters = nil
677 0 : switch rng.Intn(3) {
678 0 : case 0:
679 0 : lopts.FilterPolicy = nil
680 0 : case 1:
681 0 : lopts.FilterPolicy = bloom.FilterPolicy(10)
682 0 : default:
683 0 : lopts.FilterPolicy = newTestingFilterPolicy(1 << rng.Intn(5))
684 : }
685 :
686 : // We use either no compression, snappy compression or zstd compression.
687 0 : switch rng.Intn(3) {
688 0 : case 0:
689 0 : lopts.Compression = pebble.NoCompression
690 0 : case 1:
691 0 : lopts.Compression = pebble.ZstdCompression
692 0 : default:
693 0 : lopts.Compression = pebble.SnappyCompression
694 : }
695 0 : opts.Levels = []pebble.LevelOptions{lopts}
696 0 :
697 0 : // Explicitly disable disk-backed FS's for the random configurations. The
698 0 : // single standard test configuration that uses a disk-backed FS is
699 0 : // sufficient.
700 0 : testOpts.useDisk = false
701 0 : testOpts.strictFS = rng.Intn(2) != 0 // Only relevant for MemFS.
702 0 : // 50% of the time, enable IO latency injection.
703 0 : if rng.Intn(2) == 0 {
704 0 : testOpts.ioLatencyMean = expRandDuration(rng, 3*time.Millisecond, time.Second)
705 0 : testOpts.ioLatencyProbability = 0.01 * rng.Float64() // 0-1%
706 0 : testOpts.ioLatencySeed = rng.Int63()
707 0 : }
708 0 : testOpts.Threads = rng.Intn(runtime.GOMAXPROCS(0)) + 1
709 0 : if testOpts.strictFS {
710 0 : opts.DisableWAL = false
711 0 : opts.FS = vfs.NewStrictMem()
712 0 : } else if !testOpts.useDisk {
713 0 : opts.FS = vfs.NewMem()
714 0 : }
715 : // Update the WALFailover's secondary to use the same FS. This isn't
716 : // strictly necessary (the WALFailover could use a separate FS), but it
717 : // ensures when we save a copy of the test state to disk, we include the
718 : // secondary's WALs.
719 0 : if opts.WALFailover != nil {
720 0 : opts.WALFailover.Secondary.FS = opts.FS
721 0 : }
722 0 : testOpts.ingestUsingApply = rng.Intn(2) != 0
723 0 : testOpts.deleteSized = rng.Intn(2) != 0
724 0 : testOpts.replaceSingleDelete = rng.Intn(2) != 0
725 0 : testOpts.disableBlockPropertyCollector = rng.Intn(2) == 1
726 0 : if testOpts.disableBlockPropertyCollector {
727 0 : testOpts.Opts.BlockPropertyCollectors = nil
728 0 : }
729 0 : testOpts.enableValueBlocks = rng.Intn(2) != 0
730 0 : if testOpts.enableValueBlocks {
731 0 : testOpts.Opts.Experimental.EnableValueBlocks = func() bool { return true }
732 : }
733 0 : testOpts.disableValueBlocksForIngestSSTables = rng.Intn(2) == 0
734 0 : testOpts.asyncApplyToDB = rng.Intn(2) != 0
735 0 : // 20% of time, enable shared storage.
736 0 : if rng.Intn(5) == 0 {
737 0 : testOpts.sharedStorageEnabled = true
738 0 : if testOpts.Opts.FormatMajorVersion < pebble.FormatMinForSharedObjects {
739 0 : testOpts.Opts.FormatMajorVersion = pebble.FormatMinForSharedObjects
740 0 : }
741 0 : testOpts.sharedStorageFS = remote.NewInMem()
742 0 : // If shared storage is enabled, pick between writing all files on shared
743 0 : // vs. lower levels only, 50% of the time.
744 0 : testOpts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedAll
745 0 : if rng.Intn(2) == 0 {
746 0 : testOpts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedLower
747 0 : }
748 : // If shared storage is enabled, enable secondary cache 50% of time.
749 0 : if rng.Intn(2) == 0 {
750 0 : testOpts.secondaryCacheEnabled = true
751 0 : // TODO(josh): Randomize various secondary cache settings.
752 0 : testOpts.Opts.Experimental.SecondaryCacheSizeBytes = 1024 * 1024 * 32 // 32 MBs
753 0 : }
754 : // 50% of the time, enable shared replication.
755 0 : testOpts.useSharedReplicate = rng.Intn(2) == 0
756 : }
757 :
758 : // 50% of time, enable external storage.
759 0 : if rng.Intn(2) == 0 {
760 0 : testOpts.externalStorageEnabled = true
761 0 : if testOpts.Opts.FormatMajorVersion < pebble.FormatSyntheticPrefixSuffix {
762 0 : testOpts.Opts.FormatMajorVersion = pebble.FormatSyntheticPrefixSuffix
763 0 : }
764 0 : testOpts.externalStorageFS = remote.NewInMem()
765 : }
766 :
767 0 : testOpts.seedEFOS = rng.Uint64()
768 0 : // TODO(bilal): Enable ingestSplit when known bugs with virtual sstables
769 0 : // are addressed.
770 0 : //
771 0 : // testOpts.ingestSplit = rng.Intn(2) == 0
772 0 : opts.Experimental.IngestSplit = func() bool { return testOpts.ingestSplit }
773 0 : testOpts.useExcise = rng.Intn(2) == 0
774 0 : if testOpts.useExcise {
775 0 : if testOpts.Opts.FormatMajorVersion < pebble.FormatVirtualSSTables {
776 0 : testOpts.Opts.FormatMajorVersion = pebble.FormatVirtualSSTables
777 0 : }
778 : }
779 0 : testOpts.InitRemoteStorageFactory()
780 0 : testOpts.Opts.EnsureDefaults()
781 0 : return testOpts
782 : }
783 :
784 0 : func expRandDuration(rng *rand.Rand, meanDur, maxDur time.Duration) time.Duration {
785 0 : return min(maxDur, time.Duration(math.Round(rng.ExpFloat64()*float64(meanDur))))
786 0 : }
787 :
788 0 : func setupInitialState(dataDir string, testOpts *TestOptions) error {
789 0 : // Copy (vfs.Default,<initialStatePath>/data) to (testOpts.opts.FS,<dataDir>).
790 0 : ok, err := vfs.Clone(
791 0 : vfs.Default,
792 0 : testOpts.Opts.FS,
793 0 : vfs.Default.PathJoin(testOpts.initialStatePath, "data"),
794 0 : dataDir,
795 0 : vfs.CloneSync,
796 0 : vfs.CloneSkip(func(filename string) bool {
797 0 : // Skip the archive of historical files, any checkpoints created by
798 0 : // operations and files staged for ingest in tmp.
799 0 : b := filepath.Base(filename)
800 0 : return b == "archive" || b == "checkpoints" || b == "tmp"
801 0 : }))
802 0 : if err != nil {
803 0 : return err
804 0 : } else if !ok {
805 0 : return os.ErrNotExist
806 0 : }
807 :
808 : // Tests with wal_dir set store their WALs in a `wal` directory. The source
809 : // database (initialStatePath) could've had wal_dir set, or the current test
810 : // options (testOpts) could have wal_dir set, or both.
811 : //
812 : // If the test opts are not configured to use a WAL dir, we add the WAL dir
813 : // as a 'WAL recovery dir' so that we'll read any WALs in the directory in
814 : // Open.
815 0 : walRecoveryPath := testOpts.Opts.FS.PathJoin(dataDir, "wal")
816 0 : if testOpts.Opts.WALDir != "" {
817 0 : // If the test opts are configured to use a WAL dir, we add the data
818 0 : // directory itself as a 'WAL recovery dir' so that we'll read any WALs if
819 0 : // the previous test was writing them to the data directory.
820 0 : walRecoveryPath = dataDir
821 0 : }
822 0 : testOpts.Opts.WALRecoveryDirs = append(testOpts.Opts.WALRecoveryDirs, wal.Dir{
823 0 : FS: testOpts.Opts.FS,
824 0 : Dirname: walRecoveryPath,
825 0 : })
826 0 :
827 0 : // If the failover dir exists and the test opts are not configured to use
828 0 : // WAL failover, add the failover directory as a 'WAL recovery dir' in case
829 0 : // the previous test was configured to use failover.
830 0 : failoverDir := testOpts.Opts.FS.PathJoin(dataDir, "wal_secondary")
831 0 : if _, err := testOpts.Opts.FS.Stat(failoverDir); err == nil && testOpts.Opts.WALFailover == nil {
832 0 : testOpts.Opts.WALRecoveryDirs = append(testOpts.Opts.WALRecoveryDirs, wal.Dir{
833 0 : FS: testOpts.Opts.FS,
834 0 : Dirname: failoverDir,
835 0 : })
836 0 : }
837 0 : return nil
838 : }
839 :
840 : var blockPropertyCollectorConstructors = []func() pebble.BlockPropertyCollector{
841 : sstable.NewTestKeysBlockPropertyCollector,
842 : }
843 :
844 : // testingFilterPolicy is used to allow bloom filter policies with non-default
845 : // bits-per-key setting. It is necessary because the name of the production
846 : // filter policy is fixed (see bloom.FilterPolicy.Name()); we need to output a
847 : // custom policy name to the OPTIONS file that the test can then parse.
848 : type testingFilterPolicy struct {
849 : bloom.FilterPolicy
850 : }
851 :
852 : var _ pebble.FilterPolicy = (*testingFilterPolicy)(nil)
853 :
854 1 : func newTestingFilterPolicy(bitsPerKey int) *testingFilterPolicy {
855 1 : return &testingFilterPolicy{
856 1 : FilterPolicy: bloom.FilterPolicy(bitsPerKey),
857 1 : }
858 1 : }
859 :
860 : const testingFilterPolicyFmt = "testing_bloom_filter/bits_per_key=%d"
861 :
862 : // Name implements the pebble.FilterPolicy interface.
863 1 : func (t *testingFilterPolicy) Name() string {
864 1 : if t.FilterPolicy == 10 {
865 0 : return "rocksdb.BuiltinBloomFilter"
866 0 : }
867 1 : return fmt.Sprintf(testingFilterPolicyFmt, t.FilterPolicy)
868 : }
869 :
870 1 : func filterPolicyFromName(name string) (pebble.FilterPolicy, error) {
871 1 : switch name {
872 1 : case "none":
873 1 : return nil, nil
874 1 : case "rocksdb.BuiltinBloomFilter":
875 1 : return bloom.FilterPolicy(10), nil
876 : }
877 1 : var bitsPerKey int
878 1 : if _, err := fmt.Sscanf(name, testingFilterPolicyFmt, &bitsPerKey); err != nil {
879 0 : return nil, errors.Errorf("Invalid filter policy name '%s'", name)
880 0 : }
881 1 : return newTestingFilterPolicy(bitsPerKey), nil
882 : }
|