Line data Source code
1 : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package metamorphic
6 :
7 : import (
8 : "bytes"
9 : "fmt"
10 : "os"
11 : "path/filepath"
12 : "runtime"
13 : "strconv"
14 : "strings"
15 : "time"
16 :
17 : "github.com/cockroachdb/errors"
18 : "github.com/cockroachdb/pebble"
19 : "github.com/cockroachdb/pebble/bloom"
20 : "github.com/cockroachdb/pebble/internal/cache"
21 : "github.com/cockroachdb/pebble/internal/testkeys"
22 : "github.com/cockroachdb/pebble/objstorage/remote"
23 : "github.com/cockroachdb/pebble/sstable"
24 : "github.com/cockroachdb/pebble/vfs"
25 : "golang.org/x/exp/rand"
26 : )
27 :
28 : const (
29 : // The metamorphic test exercises range keys, so we cannot use an older
30 : // FormatMajorVersion than pebble.FormatRangeKeys.
31 : minimumFormatMajorVersion = pebble.FormatRangeKeys
32 : // The format major version to use in the default options configurations. We
33 : // default to the last format major version of Cockroach 22.2 so we exercise
34 : // the runtime version ratcheting that a cluster upgrading to 23.1 would
35 : // experience. The randomized options may still use format major versions
36 : // that are less than defaultFormatMajorVersion but are at least
37 : // minimumFormatMajorVersion.
38 : defaultFormatMajorVersion = pebble.FormatPrePebblev1Marked
39 : // newestFormatMajorVersionToTest is the most recent format major version
40 : // the metamorphic tests should use. This may be greater than
41 : // pebble.FormatNewest when some format major versions are marked as
42 : // experimental.
43 : newestFormatMajorVersionToTest = pebble.FormatNewest
44 : )
45 :
46 : func parseOptions(
47 : opts *TestOptions, data string, customOptionParsers map[string]func(string) (CustomOption, bool),
48 2 : ) error {
49 2 : hooks := &pebble.ParseHooks{
50 2 : NewCache: pebble.NewCache,
51 2 : NewFilterPolicy: filterPolicyFromName,
52 2 : SkipUnknown: func(name, value string) bool {
53 2 : switch name {
54 0 : case "TestOptions":
55 0 : return true
56 2 : case "TestOptions.strictfs":
57 2 : opts.strictFS = true
58 2 : return true
59 2 : case "TestOptions.ingest_using_apply":
60 2 : opts.ingestUsingApply = true
61 2 : return true
62 2 : case "TestOptions.delete_sized":
63 2 : opts.deleteSized = true
64 2 : return true
65 2 : case "TestOptions.replace_single_delete":
66 2 : opts.replaceSingleDelete = true
67 2 : return true
68 2 : case "TestOptions.use_disk":
69 2 : opts.useDisk = true
70 2 : return true
71 0 : case "TestOptions.initial_state_desc":
72 0 : opts.initialStateDesc = value
73 0 : return true
74 0 : case "TestOptions.initial_state_path":
75 0 : opts.initialStatePath = value
76 0 : return true
77 2 : case "TestOptions.threads":
78 2 : v, err := strconv.Atoi(value)
79 2 : if err != nil {
80 0 : panic(err)
81 : }
82 2 : opts.threads = v
83 2 : return true
84 2 : case "TestOptions.disable_block_property_collector":
85 2 : v, err := strconv.ParseBool(value)
86 2 : if err != nil {
87 0 : panic(err)
88 : }
89 2 : opts.disableBlockPropertyCollector = v
90 2 : if v {
91 2 : opts.Opts.BlockPropertyCollectors = nil
92 2 : }
93 2 : return true
94 2 : case "TestOptions.enable_value_blocks":
95 2 : opts.enableValueBlocks = true
96 2 : opts.Opts.Experimental.EnableValueBlocks = func() bool { return true }
97 2 : return true
98 2 : case "TestOptions.async_apply_to_db":
99 2 : opts.asyncApplyToDB = true
100 2 : return true
101 2 : case "TestOptions.shared_storage_enabled":
102 2 : opts.sharedStorageEnabled = true
103 2 : opts.Opts.Experimental.RemoteStorage = remote.MakeSimpleFactory(map[remote.Locator]remote.Storage{
104 2 : "": remote.NewInMem(),
105 2 : })
106 2 : if opts.Opts.Experimental.CreateOnShared == remote.CreateOnSharedNone {
107 1 : opts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedAll
108 1 : }
109 2 : return true
110 2 : case "TestOptions.secondary_cache_enabled":
111 2 : opts.secondaryCacheEnabled = true
112 2 : opts.Opts.Experimental.SecondaryCacheSizeBytes = 1024 * 1024 * 32 // 32 MBs
113 2 : return true
114 2 : case "TestOptions.seed_efos":
115 2 : v, err := strconv.ParseUint(value, 10, 64)
116 2 : if err != nil {
117 0 : panic(err)
118 : }
119 2 : opts.seedEFOS = v
120 2 : return true
121 2 : case "TestOptions.ingest_split":
122 2 : opts.ingestSplit = true
123 2 : opts.Opts.Experimental.IngestSplit = func() bool {
124 2 : return true
125 2 : }
126 2 : return true
127 1 : default:
128 1 : if customOptionParsers == nil {
129 0 : return false
130 0 : }
131 1 : name = strings.TrimPrefix(name, "TestOptions.")
132 1 : if p, ok := customOptionParsers[name]; ok {
133 1 : if customOpt, ok := p(value); ok {
134 1 : opts.CustomOpts = append(opts.CustomOpts, customOpt)
135 1 : return true
136 1 : }
137 : }
138 0 : return false
139 : }
140 : },
141 : }
142 2 : err := opts.Opts.Parse(data, hooks)
143 2 : opts.Opts.EnsureDefaults()
144 2 : return err
145 : }
146 :
147 1 : func optionsToString(opts *TestOptions) string {
148 1 : var buf bytes.Buffer
149 1 : if opts.strictFS {
150 1 : fmt.Fprint(&buf, " strictfs=true\n")
151 1 : }
152 1 : if opts.ingestUsingApply {
153 1 : fmt.Fprint(&buf, " ingest_using_apply=true\n")
154 1 : }
155 1 : if opts.deleteSized {
156 1 : fmt.Fprint(&buf, " delete_sized=true\n")
157 1 : }
158 1 : if opts.replaceSingleDelete {
159 1 : fmt.Fprint(&buf, " replace_single_delete=true\n")
160 1 : }
161 1 : if opts.useDisk {
162 1 : fmt.Fprint(&buf, " use_disk=true\n")
163 1 : }
164 1 : if opts.initialStatePath != "" {
165 0 : fmt.Fprintf(&buf, " initial_state_path=%s\n", opts.initialStatePath)
166 0 : }
167 1 : if opts.initialStateDesc != "" {
168 0 : fmt.Fprintf(&buf, " initial_state_desc=%s\n", opts.initialStateDesc)
169 0 : }
170 1 : if opts.threads != 0 {
171 1 : fmt.Fprintf(&buf, " threads=%d\n", opts.threads)
172 1 : }
173 1 : if opts.disableBlockPropertyCollector {
174 1 : fmt.Fprintf(&buf, " disable_block_property_collector=%t\n", opts.disableBlockPropertyCollector)
175 1 : }
176 1 : if opts.enableValueBlocks {
177 1 : fmt.Fprintf(&buf, " enable_value_blocks=%t\n", opts.enableValueBlocks)
178 1 : }
179 1 : if opts.asyncApplyToDB {
180 1 : fmt.Fprint(&buf, " async_apply_to_db=true\n")
181 1 : }
182 1 : if opts.sharedStorageEnabled {
183 1 : fmt.Fprint(&buf, " shared_storage_enabled=true\n")
184 1 : }
185 1 : if opts.secondaryCacheEnabled {
186 1 : fmt.Fprint(&buf, " secondary_cache_enabled=true\n")
187 1 : }
188 1 : if opts.seedEFOS != 0 {
189 1 : fmt.Fprintf(&buf, " seed_efos=%d\n", opts.seedEFOS)
190 1 : }
191 1 : if opts.ingestSplit {
192 1 : fmt.Fprintf(&buf, " ingest_split=%v\n", opts.ingestSplit)
193 1 : }
194 1 : for _, customOpt := range opts.CustomOpts {
195 1 : fmt.Fprintf(&buf, " %s=%s\n", customOpt.Name(), customOpt.Value())
196 1 : }
197 :
198 1 : s := opts.Opts.String()
199 1 : if buf.Len() == 0 {
200 0 : return s
201 0 : }
202 1 : return s + "\n[TestOptions]\n" + buf.String()
203 : }
204 :
205 2 : func defaultTestOptions() *TestOptions {
206 2 : return &TestOptions{
207 2 : Opts: defaultOptions(),
208 2 : threads: 16,
209 2 : }
210 2 : }
211 :
212 2 : func defaultOptions() *pebble.Options {
213 2 : opts := &pebble.Options{
214 2 : Comparer: testkeys.Comparer,
215 2 : FS: vfs.NewMem(),
216 2 : FormatMajorVersion: defaultFormatMajorVersion,
217 2 : Levels: []pebble.LevelOptions{{
218 2 : FilterPolicy: bloom.FilterPolicy(10),
219 2 : }},
220 2 : BlockPropertyCollectors: blockPropertyCollectorConstructors,
221 2 : }
222 2 : return opts
223 2 : }
224 :
225 : // TestOptions describes the options configuring an individual run of the
226 : // metamorphic tests.
227 : type TestOptions struct {
228 : // Opts holds the *pebble.Options for the test.
229 : Opts *pebble.Options
230 : // CustomOptions holds custom test options that are defined outside of this
231 : // package.
232 : CustomOpts []CustomOption
233 : useDisk bool
234 : strictFS bool
235 : threads int
236 : // Use Batch.Apply rather than DB.Ingest.
237 : ingestUsingApply bool
238 : // Use Batch.DeleteSized rather than Batch.Delete.
239 : deleteSized bool
240 : // Replace a SINGLEDEL with a DELETE.
241 : replaceSingleDelete bool
242 : // The path on the local filesystem where the initial state of the database
243 : // exists. Empty if the test run begins from an empty database state.
244 : initialStatePath string
245 : // A human-readable string describing the initial state of the database.
246 : // Empty if the test run begins from an empty database state.
247 : initialStateDesc string
248 : // Disable the block property collector, which may be used by block property
249 : // filters.
250 : disableBlockPropertyCollector bool
251 : // Enable the use of value blocks.
252 : enableValueBlocks bool
253 : // Use DB.ApplyNoSyncWait for applies that want to sync the WAL.
254 : asyncApplyToDB bool
255 : // Enable the use of shared storage.
256 : sharedStorageEnabled bool
257 : // Enables the use of shared replication in TestOptions.
258 : useSharedReplicate bool
259 : // Enable the secondary cache. Only effective if sharedStorageEnabled is
260 : // also true.
261 : secondaryCacheEnabled bool
262 : // If nonzero, enables the use of EventuallyFileOnlySnapshots for
263 : // newSnapshotOps that are keyspan-bounded. The set of which newSnapshotOps
264 : // are actually created as EventuallyFileOnlySnapshots is deterministically
265 : // derived from the seed and the operation index.
266 : seedEFOS uint64
267 : // Enables ingest splits. Saved here for serialization as Options does not
268 : // serialize this.
269 : ingestSplit bool
270 : }
271 :
272 : // CustomOption defines a custom option that configures the behavior of an
273 : // individual test run. Like all test options, custom options are serialized to
274 : // the OPTIONS file even if they're not options ordinarily understood by Pebble.
275 : type CustomOption interface {
276 : // Name returns the name of the custom option. This is the key under which
277 : // the option appears in the OPTIONS file, within the [TestOptions] stanza.
278 : Name() string
279 : // Value returns the value of the custom option, serialized as it should
280 : // appear within the OPTIONS file.
281 : Value() string
282 : // Close is run after the test database has been closed at the end of the
283 : // test as well as during restart operations within the test sequence. It's
284 : // passed a copy of the *pebble.Options. If the custom options hold on to
285 : // any resources outside, Close should release them.
286 : Close(*pebble.Options) error
287 : // Open is run before the test runs and during a restart operation after the
288 : // test database has been closed and Close has been called. It's passed a
289 : // copy of the *pebble.Options. If the custom options must acquire any
290 : // resources before the test continues, it should reacquire them.
291 : Open(*pebble.Options) error
292 :
293 : // TODO(jackson): provide additional hooks for custom options changing the
294 : // behavior of a run.
295 : }
296 :
297 1 : func standardOptions() []*TestOptions {
298 1 : // The index labels are not strictly necessary, but they make it easier to
299 1 : // find which options correspond to a failure.
300 1 : stdOpts := []string{
301 1 : 0: "", // default options
302 1 : 1: `
303 1 : [Options]
304 1 : cache_size=1
305 1 : `,
306 1 : 2: `
307 1 : [Options]
308 1 : disable_wal=true
309 1 : `,
310 1 : 3: `
311 1 : [Options]
312 1 : l0_compaction_threshold=1
313 1 : `,
314 1 : 4: `
315 1 : [Options]
316 1 : l0_compaction_threshold=1
317 1 : l0_stop_writes_threshold=1
318 1 : `,
319 1 : 5: `
320 1 : [Options]
321 1 : lbase_max_bytes=1
322 1 : `,
323 1 : 6: `
324 1 : [Options]
325 1 : max_manifest_file_size=1
326 1 : `,
327 1 : 7: `
328 1 : [Options]
329 1 : max_open_files=1
330 1 : `,
331 1 : 8: `
332 1 : [Options]
333 1 : mem_table_size=2000
334 1 : `,
335 1 : 9: `
336 1 : [Options]
337 1 : mem_table_stop_writes_threshold=2
338 1 : `,
339 1 : 10: `
340 1 : [Options]
341 1 : wal_dir=data/wal
342 1 : `,
343 1 : 11: `
344 1 : [Level "0"]
345 1 : block_restart_interval=1
346 1 : `,
347 1 : 12: `
348 1 : [Level "0"]
349 1 : block_size=1
350 1 : `,
351 1 : 13: `
352 1 : [Level "0"]
353 1 : compression=NoCompression
354 1 : `,
355 1 : 14: `
356 1 : [Level "0"]
357 1 : index_block_size=1
358 1 : `,
359 1 : 15: `
360 1 : [Level "0"]
361 1 : target_file_size=1
362 1 : `,
363 1 : 16: `
364 1 : [Level "0"]
365 1 : filter_policy=none
366 1 : `,
367 1 : // 1GB
368 1 : 17: `
369 1 : [Options]
370 1 : bytes_per_sync=1073741824
371 1 : [TestOptions]
372 1 : strictfs=true
373 1 : `,
374 1 : 18: `
375 1 : [Options]
376 1 : max_concurrent_compactions=2
377 1 : `,
378 1 : 19: `
379 1 : [TestOptions]
380 1 : ingest_using_apply=true
381 1 : `,
382 1 : 20: `
383 1 : [TestOptions]
384 1 : replace_single_delete=true
385 1 : `,
386 1 : 21: `
387 1 : [TestOptions]
388 1 : use_disk=true
389 1 : `,
390 1 : 22: `
391 1 : [Options]
392 1 : max_writer_concurrency=2
393 1 : force_writer_parallelism=true
394 1 : `,
395 1 : 23: `
396 1 : [TestOptions]
397 1 : disable_block_property_collector=true
398 1 : `,
399 1 : 24: `
400 1 : [TestOptions]
401 1 : threads=1
402 1 : `,
403 1 : 25: `
404 1 : [TestOptions]
405 1 : enable_value_blocks=true
406 1 : `,
407 1 : 26: fmt.Sprintf(`
408 1 : [Options]
409 1 : format_major_version=%s
410 1 : `, newestFormatMajorVersionToTest),
411 1 : 27: `
412 1 : [TestOptions]
413 1 : shared_storage_enabled=true
414 1 : secondary_cache_enabled=true
415 1 : `,
416 1 : }
417 1 :
418 1 : opts := make([]*TestOptions, len(stdOpts))
419 1 : for i := range opts {
420 1 : opts[i] = defaultTestOptions()
421 1 : // NB: The standard options by definition can never include custom
422 1 : // options, so no need to propagate custom option parsers.
423 1 : if err := parseOptions(opts[i], stdOpts[i], nil /* custom option parsers */); err != nil {
424 0 : panic(err)
425 : }
426 : }
427 1 : return opts
428 : }
429 :
430 : func randomOptions(
431 : rng *rand.Rand, customOptionParsers map[string]func(string) (CustomOption, bool),
432 1 : ) *TestOptions {
433 1 : testOpts := defaultTestOptions()
434 1 : opts := testOpts.Opts
435 1 :
436 1 : // There are some private options, which we don't want users to fiddle with.
437 1 : // There's no way to set it through the public interface. The only method is
438 1 : // through Parse.
439 1 : {
440 1 : var privateOpts bytes.Buffer
441 1 : fmt.Fprintln(&privateOpts, `[Options]`)
442 1 : if rng.Intn(3) == 0 /* 33% */ {
443 1 : fmt.Fprintln(&privateOpts, ` disable_delete_only_compactions=true`)
444 1 : }
445 1 : if rng.Intn(3) == 0 /* 33% */ {
446 1 : fmt.Fprintln(&privateOpts, ` disable_elision_only_compactions=true`)
447 1 : }
448 1 : if rng.Intn(5) == 0 /* 20% */ {
449 1 : fmt.Fprintln(&privateOpts, ` disable_lazy_combined_iteration=true`)
450 1 : }
451 1 : if privateOptsStr := privateOpts.String(); privateOptsStr != `[Options]\n` {
452 1 : parseOptions(testOpts, privateOptsStr, customOptionParsers)
453 1 : }
454 : }
455 :
456 1 : opts.BytesPerSync = 1 << uint(rng.Intn(28)) // 1B - 256MB
457 1 : opts.Cache = cache.New(1 << uint(rng.Intn(30))) // 1B - 1GB
458 1 : opts.DisableWAL = rng.Intn(2) == 0
459 1 : opts.FlushDelayDeleteRange = time.Millisecond * time.Duration(5*rng.Intn(245)) // 5-250ms
460 1 : opts.FlushDelayRangeKey = time.Millisecond * time.Duration(5*rng.Intn(245)) // 5-250ms
461 1 : opts.FlushSplitBytes = 1 << rng.Intn(20) // 1B - 1MB
462 1 : opts.FormatMajorVersion = minimumFormatMajorVersion
463 1 : n := int(newestFormatMajorVersionToTest - opts.FormatMajorVersion)
464 1 : opts.FormatMajorVersion += pebble.FormatMajorVersion(rng.Intn(n + 1))
465 1 : opts.Experimental.L0CompactionConcurrency = 1 + rng.Intn(4) // 1-4
466 1 : opts.Experimental.LevelMultiplier = 5 << rng.Intn(7) // 5 - 320
467 1 : opts.TargetByteDeletionRate = 1 << uint(20+rng.Intn(10)) // 1MB - 1GB
468 1 : opts.Experimental.ValidateOnIngest = rng.Intn(2) != 0
469 1 : opts.L0CompactionThreshold = 1 + rng.Intn(100) // 1 - 100
470 1 : opts.L0CompactionFileThreshold = 1 << rng.Intn(11) // 1 - 1024
471 1 : opts.L0StopWritesThreshold = 1 + rng.Intn(100) // 1 - 100
472 1 : if opts.L0StopWritesThreshold < opts.L0CompactionThreshold {
473 1 : opts.L0StopWritesThreshold = opts.L0CompactionThreshold
474 1 : }
475 1 : opts.LBaseMaxBytes = 1 << uint(rng.Intn(30)) // 1B - 1GB
476 1 : maxConcurrentCompactions := rng.Intn(3) + 1 // 1-3
477 1 : opts.MaxConcurrentCompactions = func() int {
478 1 : return maxConcurrentCompactions
479 1 : }
480 1 : opts.MaxManifestFileSize = 1 << uint(rng.Intn(30)) // 1B - 1GB
481 1 : opts.MemTableSize = 2 << (10 + uint(rng.Intn(16))) // 2KB - 256MB
482 1 : opts.MemTableStopWritesThreshold = 2 + rng.Intn(5) // 2 - 5
483 1 : if rng.Intn(2) == 0 {
484 1 : opts.WALDir = "data/wal"
485 1 : }
486 1 : if rng.Intn(4) == 0 {
487 1 : // Enable Writer parallelism for 25% of the random options. Setting
488 1 : // MaxWriterConcurrency to any value greater than or equal to 1 has the
489 1 : // same effect currently.
490 1 : opts.Experimental.MaxWriterConcurrency = 2
491 1 : opts.Experimental.ForceWriterParallelism = true
492 1 : }
493 1 : if rng.Intn(2) == 0 {
494 1 : opts.Experimental.DisableIngestAsFlushable = func() bool { return true }
495 : }
496 1 : var lopts pebble.LevelOptions
497 1 : lopts.BlockRestartInterval = 1 + rng.Intn(64) // 1 - 64
498 1 : lopts.BlockSize = 1 << uint(rng.Intn(24)) // 1 - 16MB
499 1 : lopts.BlockSizeThreshold = 50 + rng.Intn(50) // 50 - 100
500 1 : lopts.IndexBlockSize = 1 << uint(rng.Intn(24)) // 1 - 16MB
501 1 : lopts.TargetFileSize = 1 << uint(rng.Intn(28)) // 1 - 256MB
502 1 :
503 1 : // We either use no bloom filter, the default filter, or a filter with
504 1 : // randomized bits-per-key setting. We zero out the Filters map. It'll get
505 1 : // repopulated on EnsureDefaults accordingly.
506 1 : opts.Filters = nil
507 1 : switch rng.Intn(3) {
508 1 : case 0:
509 1 : lopts.FilterPolicy = nil
510 1 : case 1:
511 1 : lopts.FilterPolicy = bloom.FilterPolicy(10)
512 1 : default:
513 1 : lopts.FilterPolicy = newTestingFilterPolicy(1 << rng.Intn(5))
514 : }
515 :
516 : // We use either no compression, snappy compression or zstd compression.
517 1 : switch rng.Intn(3) {
518 1 : case 0:
519 1 : lopts.Compression = pebble.NoCompression
520 1 : case 1:
521 1 : lopts.Compression = pebble.ZstdCompression
522 1 : default:
523 1 : lopts.Compression = pebble.SnappyCompression
524 : }
525 1 : opts.Levels = []pebble.LevelOptions{lopts}
526 1 :
527 1 : // Explicitly disable disk-backed FS's for the random configurations. The
528 1 : // single standard test configuration that uses a disk-backed FS is
529 1 : // sufficient.
530 1 : testOpts.useDisk = false
531 1 : testOpts.strictFS = rng.Intn(2) != 0 // Only relevant for MemFS.
532 1 : testOpts.threads = rng.Intn(runtime.GOMAXPROCS(0)) + 1
533 1 : if testOpts.strictFS {
534 1 : opts.DisableWAL = false
535 1 : }
536 1 : testOpts.ingestUsingApply = rng.Intn(2) != 0
537 1 : testOpts.deleteSized = rng.Intn(2) != 0
538 1 : testOpts.replaceSingleDelete = rng.Intn(2) != 0
539 1 : testOpts.disableBlockPropertyCollector = rng.Intn(2) == 1
540 1 : if testOpts.disableBlockPropertyCollector {
541 1 : testOpts.Opts.BlockPropertyCollectors = nil
542 1 : }
543 1 : testOpts.enableValueBlocks = opts.FormatMajorVersion >= pebble.FormatSSTableValueBlocks &&
544 1 : rng.Intn(2) != 0
545 1 : if testOpts.enableValueBlocks {
546 1 : testOpts.Opts.Experimental.EnableValueBlocks = func() bool { return true }
547 : }
548 1 : testOpts.asyncApplyToDB = rng.Intn(2) != 0
549 1 : // 20% of time, enable shared storage.
550 1 : if rng.Intn(5) == 0 {
551 1 : testOpts.sharedStorageEnabled = true
552 1 : inMemShared := remote.NewInMem()
553 1 : testOpts.Opts.Experimental.RemoteStorage = remote.MakeSimpleFactory(map[remote.Locator]remote.Storage{
554 1 : "": inMemShared,
555 1 : })
556 1 : // If shared storage is enabled, pick between writing all files on shared
557 1 : // vs. lower levels only, 50% of the time.
558 1 : testOpts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedAll
559 1 : if rng.Intn(2) == 0 {
560 1 : testOpts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedLower
561 1 : }
562 : // If shared storage is enabled, enable secondary cache 50% of time.
563 1 : if rng.Intn(2) == 0 {
564 1 : testOpts.secondaryCacheEnabled = true
565 1 : // TODO(josh): Randomize various secondary cache settings.
566 1 : testOpts.Opts.Experimental.SecondaryCacheSizeBytes = 1024 * 1024 * 32 // 32 MBs
567 1 : }
568 : // 50% of the time, enable shared replication.
569 1 : testOpts.useSharedReplicate = rng.Intn(2) == 0
570 : }
571 1 : testOpts.seedEFOS = rng.Uint64()
572 1 : testOpts.ingestSplit = rng.Intn(2) == 0
573 1 : opts.Experimental.IngestSplit = func() bool { return testOpts.ingestSplit }
574 1 : testOpts.Opts.EnsureDefaults()
575 1 : return testOpts
576 : }
577 :
578 1 : func setupInitialState(dataDir string, testOpts *TestOptions) error {
579 1 : // Copy (vfs.Default,<initialStatePath>/data) to (testOpts.opts.FS,<dataDir>).
580 1 : ok, err := vfs.Clone(
581 1 : vfs.Default,
582 1 : testOpts.Opts.FS,
583 1 : vfs.Default.PathJoin(testOpts.initialStatePath, "data"),
584 1 : dataDir,
585 1 : vfs.CloneSync,
586 1 : vfs.CloneSkip(func(filename string) bool {
587 1 : // Skip the archive of historical files, any checkpoints created by
588 1 : // operations and files staged for ingest in tmp.
589 1 : b := filepath.Base(filename)
590 1 : return b == "archive" || b == "checkpoints" || b == "tmp"
591 1 : }))
592 1 : if err != nil {
593 0 : return err
594 1 : } else if !ok {
595 0 : return os.ErrNotExist
596 0 : }
597 :
598 : // Tests with wal_dir set store their WALs in a `wal` directory. The source
599 : // database (initialStatePath) could've had wal_dir set, or the current test
600 : // options (testOpts) could have wal_dir set, or both.
601 1 : fs := testOpts.Opts.FS
602 1 : walDir := fs.PathJoin(dataDir, "wal")
603 1 : if err := fs.MkdirAll(walDir, os.ModePerm); err != nil {
604 0 : return err
605 0 : }
606 :
607 : // Copy <dataDir>/wal/*.log -> <dataDir>.
608 1 : src, dst := walDir, dataDir
609 1 : if testOpts.Opts.WALDir != "" {
610 0 : // Copy <dataDir>/*.log -> <dataDir>/wal.
611 0 : src, dst = dst, src
612 0 : }
613 1 : return moveLogs(fs, src, dst)
614 : }
615 :
616 1 : func moveLogs(fs vfs.FS, srcDir, dstDir string) error {
617 1 : ls, err := fs.List(srcDir)
618 1 : if err != nil {
619 0 : return err
620 0 : }
621 1 : for _, f := range ls {
622 0 : if filepath.Ext(f) != ".log" {
623 0 : continue
624 : }
625 0 : src := fs.PathJoin(srcDir, f)
626 0 : dst := fs.PathJoin(dstDir, f)
627 0 : if err := fs.Rename(src, dst); err != nil {
628 0 : return err
629 0 : }
630 : }
631 1 : return nil
632 : }
633 :
634 : var blockPropertyCollectorConstructors = []func() pebble.BlockPropertyCollector{
635 : sstable.NewTestKeysBlockPropertyCollector,
636 : }
637 :
638 : // testingFilterPolicy is used to allow bloom filter policies with non-default
639 : // bits-per-key setting. It is necessary because the name of the production
640 : // filter policy is fixed (see bloom.FilterPolicy.Name()); we need to output a
641 : // custom policy name to the OPTIONS file that the test can then parse.
642 : type testingFilterPolicy struct {
643 : bloom.FilterPolicy
644 : }
645 :
646 : var _ pebble.FilterPolicy = (*testingFilterPolicy)(nil)
647 :
648 2 : func newTestingFilterPolicy(bitsPerKey int) *testingFilterPolicy {
649 2 : return &testingFilterPolicy{
650 2 : FilterPolicy: bloom.FilterPolicy(bitsPerKey),
651 2 : }
652 2 : }
653 :
654 : const testingFilterPolicyFmt = "testing_bloom_filter/bits_per_key=%d"
655 :
656 : // Name implements the pebble.FilterPolicy interface.
657 2 : func (t *testingFilterPolicy) Name() string {
658 2 : if t.FilterPolicy == 10 {
659 0 : return "rocksdb.BuiltinBloomFilter"
660 0 : }
661 2 : return fmt.Sprintf(testingFilterPolicyFmt, t.FilterPolicy)
662 : }
663 :
664 2 : func filterPolicyFromName(name string) (pebble.FilterPolicy, error) {
665 2 : switch name {
666 2 : case "none":
667 2 : return nil, nil
668 2 : case "rocksdb.BuiltinBloomFilter":
669 2 : return bloom.FilterPolicy(10), nil
670 : }
671 2 : var bitsPerKey int
672 2 : if _, err := fmt.Sscanf(name, testingFilterPolicyFmt, &bitsPerKey); err != nil {
673 0 : return nil, errors.Errorf("Invalid filter policy name '%s'", name)
674 0 : }
675 2 : return newTestingFilterPolicy(bitsPerKey), nil
676 : }
|