Line data Source code
1 : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package metamorphic
6 :
7 : import (
8 : "bytes"
9 : "fmt"
10 : "math"
11 : "math/rand/v2"
12 : "os"
13 : "slices"
14 :
15 : "github.com/cockroachdb/pebble"
16 : "github.com/cockroachdb/pebble/internal/randvar"
17 : "github.com/cockroachdb/pebble/sstable"
18 : )
19 :
20 : const maxValueSize = 20
21 :
22 : type iterOpts struct {
23 : lower []byte
24 : upper []byte
25 : keyTypes uint32 // pebble.IterKeyType
26 : // maskSuffix may be set if keyTypes is IterKeyTypePointsAndRanges to
27 : // configure IterOptions.RangeKeyMasking.Suffix.
28 : maskSuffix []byte
29 :
30 : // If filterMax is >0, this iterator will filter out any keys that have
31 : // suffixes that don't fall within the range [filterMin,filterMax).
32 : // Additionally, the iterator will be constructed with a block-property
33 : // filter that filters out blocks accordingly. Not all OPTIONS hook up the
34 : // corresponding block property collector, so block-filtering may still be
35 : // effectively disabled in some runs. The iterator operations themselves
36 : // however will always skip past any points that should be filtered to
37 : // ensure determinism.
38 : filterMin uint64
39 : filterMax uint64
40 :
41 : // see IterOptions.UseL6Filters.
42 : useL6Filters bool
43 :
44 : // NB: If adding or removing fields, ensure IsZero is in sync.
45 : }
46 :
47 1 : func (o iterOpts) IsZero() bool {
48 1 : return o.lower == nil && o.upper == nil && o.keyTypes == 0 &&
49 1 : o.maskSuffix == nil && o.filterMin == 0 && o.filterMax == 0 && !o.useL6Filters
50 1 : }
51 :
52 : // GenerateOps generates n random operations, drawing randomness from the
53 : // provided pseudorandom generator and using cfg to determine the distribution
54 : // of op types.
55 1 : func GenerateOps(rng *rand.Rand, n uint64, cfg OpConfig) Ops {
56 1 : // Generate a new set of random ops, writing them to <dir>/ops. These will be
57 1 : // read by the child processes when performing a test run.
58 1 : return generate(rng, n, cfg, newKeyManager(1 /* num instances */))
59 1 : }
60 :
61 : type generator struct {
62 : cfg OpConfig
63 : rng *rand.Rand
64 :
65 : init *initOp
66 : ops []op
67 :
68 : // keyManager tracks the state of keys a operation generation time.
69 : keyManager *keyManager
70 : keyGenerator *keyGenerator
71 : dbs objIDSlice
72 : // Unordered sets of object IDs for live objects. Used to randomly select on
73 : // object when generating an operation. There are 4 concrete objects: the DB
74 : // (of which there is exactly 1), batches, iterators, and snapshots.
75 : //
76 : // liveBatches contains the live indexed and write-only batches.
77 : liveBatches objIDSlice
78 : // liveIters contains the live iterators.
79 : liveIters objIDSlice
80 : itersLastOpts map[objID]iterOpts
81 : // liveReaders contains the DB, and any live indexed batches and snapshots. The DB is always
82 : // at index 0.
83 : liveReaders objIDSlice
84 : // liveSnapshots contains the live snapshots.
85 : liveSnapshots objIDSlice
86 : // liveWriters contains the DB, and any live batches. The DB is always at index 0.
87 : liveWriters objIDSlice
88 : // externalObjects contains the external objects created.
89 : externalObjects objIDSlice
90 :
91 : // Maps used to find associated objects during generation. These maps are not
92 : // needed during test execution.
93 : //
94 : // batchID -> batch iters: used to keep track of the open iterators on an
95 : // indexed batch. The iter set value will also be indexed by the readers map.
96 : batches map[objID]objIDSet
97 : // iterID -> reader iters: used to keep track of all of the open
98 : // iterators. The iter set value will also be indexed by either the batches
99 : // or snapshots maps.
100 : iters map[objID]objIDSet
101 : // objectID -> db: used to keep track of the DB a batch, iter, or snapshot
102 : // was created on. It should be read through the dbIDForObj method.
103 : objDB map[objID]objID
104 : // readerID -> reader iters: used to keep track of the open iterators on a
105 : // reader. The iter set value will also be indexed by either the batches or
106 : // snapshots maps. This map is the union of batches and snapshots maps.
107 : readers map[objID]objIDSet
108 : // snapshotID -> snapshot iters: used to keep track of the open iterators on
109 : // a snapshot. The iter set value will also be indexed by the readers map.
110 : snapshots map[objID]objIDSet
111 : // snapshotID -> bounds of the snapshot: only populated for snapshots that
112 : // are constrained by bounds.
113 : snapshotBounds map[objID][]pebble.KeyRange
114 : // iterVisibleKeys is the set of keys that should be visible to the
115 : // iterator.
116 : iterVisibleKeys map[objID][][]byte
117 : // iterReaderID is a map from an iterID to a readerID.
118 : iterReaderID map[objID]objID
119 : }
120 :
121 1 : func newGenerator(rng *rand.Rand, cfg OpConfig, km *keyManager) *generator {
122 1 : keyGenerator := newKeyGenerator(km, rng, cfg)
123 1 : g := &generator{
124 1 : cfg: cfg,
125 1 : rng: rng,
126 1 : init: &initOp{dbSlots: uint32(cfg.numInstances)},
127 1 : keyManager: km,
128 1 : keyGenerator: keyGenerator,
129 1 : liveReaders: objIDSlice{makeObjID(dbTag, 1)},
130 1 : liveWriters: objIDSlice{makeObjID(dbTag, 1)},
131 1 : dbs: objIDSlice{makeObjID(dbTag, 1)},
132 1 : objDB: make(map[objID]objID),
133 1 : batches: make(map[objID]objIDSet),
134 1 : iters: make(map[objID]objIDSet),
135 1 : readers: make(map[objID]objIDSet),
136 1 : snapshots: make(map[objID]objIDSet),
137 1 : snapshotBounds: make(map[objID][]pebble.KeyRange),
138 1 : itersLastOpts: make(map[objID]iterOpts),
139 1 : iterVisibleKeys: make(map[objID][][]byte),
140 1 : iterReaderID: make(map[objID]objID),
141 1 : }
142 1 : for i := 1; i < cfg.numInstances; i++ {
143 1 : g.liveReaders = append(g.liveReaders, makeObjID(dbTag, uint32(i+1)))
144 1 : g.liveWriters = append(g.liveWriters, makeObjID(dbTag, uint32(i+1)))
145 1 : g.dbs = append(g.dbs, makeObjID(dbTag, uint32(i+1)))
146 1 : }
147 : // Note that the initOp fields are populated during generation.
148 1 : g.ops = append(g.ops, g.init)
149 1 : return g
150 : }
151 :
152 1 : func generate(rng *rand.Rand, count uint64, cfg OpConfig, km *keyManager) []op {
153 1 : g := newGenerator(rng, cfg, km)
154 1 :
155 1 : opGenerators := []func(){
156 1 : OpBatchAbort: g.batchAbort,
157 1 : OpBatchCommit: g.batchCommit,
158 1 : OpDBCheckpoint: g.dbCheckpoint,
159 1 : OpDBCompact: g.dbCompact,
160 1 : OpDBDownload: g.dbDownload,
161 1 : OpDBFlush: g.dbFlush,
162 1 : OpDBRatchetFormatMajorVersion: g.dbRatchetFormatMajorVersion,
163 1 : OpDBRestart: g.dbRestart,
164 1 : OpIterClose: g.randIter(g.iterClose),
165 1 : OpIterFirst: g.randIter(g.iterFirst),
166 1 : OpIterLast: g.randIter(g.iterLast),
167 1 : OpIterNext: g.randIter(g.iterNext),
168 1 : OpIterNextWithLimit: g.randIter(g.iterNextWithLimit),
169 1 : OpIterNextPrefix: g.randIter(g.iterNextPrefix),
170 1 : OpIterCanSingleDelete: g.randIter(g.iterCanSingleDelete),
171 1 : OpIterPrev: g.randIter(g.iterPrev),
172 1 : OpIterPrevWithLimit: g.randIter(g.iterPrevWithLimit),
173 1 : OpIterSeekGE: g.randIter(g.iterSeekGE),
174 1 : OpIterSeekGEWithLimit: g.randIter(g.iterSeekGEWithLimit),
175 1 : OpIterSeekLT: g.randIter(g.iterSeekLT),
176 1 : OpIterSeekLTWithLimit: g.randIter(g.iterSeekLTWithLimit),
177 1 : OpIterSeekPrefixGE: g.randIter(g.iterSeekPrefixGE),
178 1 : OpIterSetBounds: g.randIter(g.iterSetBounds),
179 1 : OpIterSetOptions: g.randIter(g.iterSetOptions),
180 1 : OpNewBatch: g.newBatch,
181 1 : OpNewIndexedBatch: g.newIndexedBatch,
182 1 : OpNewIter: g.newIter,
183 1 : OpNewIterUsingClone: g.newIterUsingClone,
184 1 : OpNewSnapshot: g.newSnapshot,
185 1 : OpNewExternalObj: g.newExternalObj,
186 1 : OpReaderGet: g.readerGet,
187 1 : OpReplicate: g.replicate,
188 1 : OpSnapshotClose: g.snapshotClose,
189 1 : OpWriterApply: g.writerApply,
190 1 : OpWriterDelete: g.writerDelete,
191 1 : OpWriterDeleteRange: g.writerDeleteRange,
192 1 : OpWriterIngest: g.writerIngest,
193 1 : OpWriterIngestAndExcise: g.writerIngestAndExcise,
194 1 : OpWriterIngestExternalFiles: g.writerIngestExternalFiles,
195 1 : OpWriterLogData: g.writerLogData,
196 1 : OpWriterMerge: g.writerMerge,
197 1 : OpWriterRangeKeyDelete: g.writerRangeKeyDelete,
198 1 : OpWriterRangeKeySet: g.writerRangeKeySet,
199 1 : OpWriterRangeKeyUnset: g.writerRangeKeyUnset,
200 1 : OpWriterSet: g.writerSet,
201 1 : OpWriterSingleDelete: g.writerSingleDelete,
202 1 : }
203 1 :
204 1 : // TPCC-style deck of cards randomization. Every time the end of the deck is
205 1 : // reached, we shuffle the deck.
206 1 : deck := randvar.NewDeck(g.rng, cfg.ops[:]...)
207 1 :
208 1 : defer func() {
209 1 : if r := recover(); r != nil {
210 0 : fmt.Fprintln(os.Stderr, formatOps(g.ops))
211 0 : panic(r)
212 : }
213 : }()
214 1 : for i := uint64(0); i < count; i++ {
215 1 : opGenerators[deck.Int()]()
216 1 : }
217 :
218 1 : g.dbClose()
219 1 :
220 1 : computeDerivedFields(g.ops)
221 1 : return g.ops
222 : }
223 :
224 1 : func (g *generator) add(op op) {
225 1 : g.ops = append(g.ops, op)
226 1 : g.keyManager.update(op)
227 1 : }
228 :
229 : // prefixKeyRange generates a [start, end) pair consisting of two prefix keys.
230 1 : func (g *generator) prefixKeyRange() ([]byte, []byte) {
231 1 : keys := g.keyGenerator.UniqueKeys(2, func() []byte { return g.keyGenerator.RandPrefix(0.01) })
232 1 : return keys[0], keys[1]
233 : }
234 :
235 1 : func (g *generator) randKeyToSingleDelete(id objID) []byte {
236 1 : keys := g.keyManager.eligibleSingleDeleteKeys(id)
237 1 : length := len(keys)
238 1 : if length == 0 {
239 1 : return nil
240 1 : }
241 1 : return keys[g.rng.IntN(length)]
242 : }
243 :
244 1 : func resizeBuffer(buf []byte, prefixLen, suffixLen int) []byte {
245 1 : if cap(buf) >= prefixLen+suffixLen {
246 0 : return buf[:prefixLen+suffixLen]
247 0 : }
248 1 : return make([]byte, prefixLen+suffixLen)
249 : }
250 :
251 1 : func (g *generator) newBatch() {
252 1 : batchID := makeObjID(batchTag, g.init.batchSlots)
253 1 : g.init.batchSlots++
254 1 : g.liveBatches = append(g.liveBatches, batchID)
255 1 : g.liveWriters = append(g.liveWriters, batchID)
256 1 : dbID := g.dbs.rand(g.rng)
257 1 : g.objDB[batchID] = dbID
258 1 :
259 1 : g.add(&newBatchOp{
260 1 : dbID: dbID,
261 1 : batchID: batchID,
262 1 : })
263 1 : }
264 :
265 1 : func (g *generator) newIndexedBatch() {
266 1 : batchID := makeObjID(batchTag, g.init.batchSlots)
267 1 : g.init.batchSlots++
268 1 : g.liveBatches = append(g.liveBatches, batchID)
269 1 : g.liveReaders = append(g.liveReaders, batchID)
270 1 : g.liveWriters = append(g.liveWriters, batchID)
271 1 :
272 1 : iters := make(objIDSet)
273 1 : g.batches[batchID] = iters
274 1 : g.readers[batchID] = iters
275 1 : dbID := g.dbs.rand(g.rng)
276 1 : g.objDB[batchID] = dbID
277 1 :
278 1 : g.add(&newIndexedBatchOp{
279 1 : dbID: dbID,
280 1 : batchID: batchID,
281 1 : })
282 1 : }
283 :
284 : // removeFromBatchGenerator will not generate a closeOp for the target batch as
285 : // not every batch that is removed from the generator should be closed. For
286 : // example, running a closeOp before an ingestOp that contains the closed batch
287 : // will cause an error.
288 1 : func (g *generator) removeBatchFromGenerator(batchID objID) {
289 1 : g.liveBatches.remove(batchID)
290 1 : iters := g.batches[batchID]
291 1 : delete(g.batches, batchID)
292 1 :
293 1 : if iters != nil {
294 1 : g.liveReaders.remove(batchID)
295 1 : delete(g.readers, batchID)
296 1 : }
297 1 : g.liveWriters.remove(batchID)
298 1 : for _, id := range iters.sorted() {
299 1 : g.liveIters.remove(id)
300 1 : delete(g.iters, id)
301 1 : g.add(&closeOp{objID: id})
302 1 : }
303 : }
304 :
305 1 : func (g *generator) batchAbort() {
306 1 : if len(g.liveBatches) == 0 {
307 1 : return
308 1 : }
309 :
310 1 : batchID := g.liveBatches.rand(g.rng)
311 1 : g.removeBatchFromGenerator(batchID)
312 1 :
313 1 : g.add(&closeOp{objID: batchID})
314 : }
315 :
316 1 : func (g *generator) batchCommit() {
317 1 : if len(g.liveBatches) == 0 {
318 1 : return
319 1 : }
320 :
321 1 : batchID := g.liveBatches.rand(g.rng)
322 1 : dbID := g.dbIDForObj(batchID)
323 1 : g.removeBatchFromGenerator(batchID)
324 1 :
325 1 : // The batch we're applying may contain single delete tombstones that when
326 1 : // applied to the writer result in nondeterminism in the deleted key. If
327 1 : // that's the case, we can restore determinism by first deleting the key
328 1 : // from the writer.
329 1 : //
330 1 : // Generating additional operations here is not ideal, but it simplifies
331 1 : // single delete invariants significantly.
332 1 : singleDeleteConflicts := g.keyManager.checkForSingleDelConflicts(batchID, dbID, false /* collapsed */)
333 1 : for _, conflict := range singleDeleteConflicts {
334 0 : g.add(&deleteOp{
335 0 : writerID: dbID,
336 0 : key: conflict,
337 0 : derivedDBID: dbID,
338 0 : })
339 0 : }
340 :
341 1 : g.add(&batchCommitOp{
342 1 : dbID: dbID,
343 1 : batchID: batchID,
344 1 : })
345 1 : g.add(&closeOp{objID: batchID})
346 :
347 : }
348 :
349 1 : func (g *generator) dbClose() {
350 1 : // Close any live iterators and snapshots, so that we can close the DB
351 1 : // cleanly.
352 1 : for len(g.liveIters) > 0 {
353 1 : g.randIter(g.iterClose)()
354 1 : }
355 1 : for len(g.liveSnapshots) > 0 {
356 1 : g.snapshotClose()
357 1 : }
358 1 : for len(g.liveBatches) > 0 {
359 0 : batchID := g.liveBatches[0]
360 0 : g.removeBatchFromGenerator(batchID)
361 0 : g.add(&closeOp{objID: batchID})
362 0 : }
363 1 : for len(g.dbs) > 0 {
364 1 : db := g.dbs[0]
365 1 : g.dbs = g.dbs[1:]
366 1 : g.add(&closeOp{objID: db})
367 1 : }
368 : }
369 :
370 1 : func (g *generator) dbCheckpoint() {
371 1 : numSpans := g.expRandInt(1)
372 1 : var spans []pebble.CheckpointSpan
373 1 : if numSpans > 0 {
374 1 : spans = make([]pebble.CheckpointSpan, numSpans)
375 1 : }
376 1 : for i := range spans {
377 1 : start := g.keyGenerator.RandKey(0.01)
378 1 : end := g.keyGenerator.RandKey(0.01)
379 1 : if g.cmp(start, end) > 0 {
380 1 : start, end = end, start
381 1 : }
382 1 : spans[i].Start = start
383 1 : spans[i].End = end
384 : }
385 1 : dbID := g.dbs.rand(g.rng)
386 1 : g.add(&checkpointOp{
387 1 : dbID: dbID,
388 1 : spans: spans,
389 1 : })
390 : }
391 :
392 1 : func (g *generator) dbCompact() {
393 1 : // Generate new key(s) with a 1% probability.
394 1 : start := g.keyGenerator.RandKey(0.01)
395 1 : end := g.keyGenerator.RandKey(0.01)
396 1 : if g.cmp(start, end) > 0 {
397 1 : start, end = end, start
398 1 : }
399 1 : dbID := g.dbs.rand(g.rng)
400 1 : g.add(&compactOp{
401 1 : dbID: dbID,
402 1 : start: start,
403 1 : end: end,
404 1 : parallelize: g.rng.Float64() < 0.5,
405 1 : })
406 : }
407 :
408 1 : func (g *generator) dbDownload() {
409 1 : numSpans := 1 + g.expRandInt(1)
410 1 : spans := make([]pebble.DownloadSpan, numSpans)
411 1 : for i := range spans {
412 1 : keys := g.keyGenerator.UniqueKeys(2, func() []byte { return g.keyGenerator.RandKey(0.001) })
413 1 : start, end := keys[0], keys[1]
414 1 : spans[i].StartKey = start
415 1 : spans[i].EndKey = end
416 1 : spans[i].ViaBackingFileDownload = g.rng.IntN(2) == 0
417 : }
418 1 : dbID := g.dbs.rand(g.rng)
419 1 : g.add(&downloadOp{
420 1 : dbID: dbID,
421 1 : spans: spans,
422 1 : })
423 : }
424 :
425 1 : func (g *generator) dbFlush() {
426 1 : g.add(&flushOp{g.dbs.rand(g.rng)})
427 1 : }
428 :
429 1 : func (g *generator) dbRatchetFormatMajorVersion() {
430 1 : // Ratchet to a random format major version between the minimum the
431 1 : // metamorphic tests support and the newest. At runtime, the generated
432 1 : // version may be behind the database's format major version, in which case
433 1 : // RatchetFormatMajorVersion should deterministically error.
434 1 :
435 1 : dbID := g.dbs.rand(g.rng)
436 1 : n := int(newestFormatMajorVersionToTest - minimumFormatMajorVersion)
437 1 : vers := pebble.FormatMajorVersion(g.rng.IntN(n+1)) + minimumFormatMajorVersion
438 1 : g.add(&dbRatchetFormatMajorVersionOp{dbID: dbID, vers: vers})
439 1 : }
440 :
441 1 : func (g *generator) dbRestart() {
442 1 : // Close any live iterators and snapshots, so that we can close the DB
443 1 : // cleanly.
444 1 : dbID := g.dbs.rand(g.rng)
445 1 : for len(g.liveIters) > 0 {
446 1 : g.randIter(g.iterClose)()
447 1 : }
448 1 : for len(g.liveSnapshots) > 0 {
449 1 : g.snapshotClose()
450 1 : }
451 : // Close the batches.
452 1 : for len(g.liveBatches) > 0 {
453 1 : batchID := g.liveBatches[0]
454 1 : g.removeBatchFromGenerator(batchID)
455 1 : g.add(&closeOp{objID: batchID})
456 1 : }
457 1 : if len(g.liveReaders) != len(g.dbs) || len(g.liveWriters) != len(g.dbs) {
458 0 : panic(fmt.Sprintf("unexpected counts: liveReaders %d, liveWriters: %d",
459 0 : len(g.liveReaders), len(g.liveWriters)))
460 : }
461 1 : g.add(&dbRestartOp{dbID: dbID})
462 : }
463 :
464 : // maybeSetSnapshotIterBounds must be called whenever creating a new iterator or
465 : // modifying the bounds of an iterator. If the iterator is backed by a snapshot
466 : // that only guarantees consistency within a limited set of key spans, then the
467 : // iterator must set bounds within one of the snapshot's consistent keyspans. It
468 : // returns true if the provided readerID is a bounded snapshot and bounds were
469 : // set.
470 1 : func (g *generator) maybeSetSnapshotIterBounds(readerID objID, opts *iterOpts) bool {
471 1 : snapBounds, isBoundedSnapshot := g.snapshotBounds[readerID]
472 1 : if !isBoundedSnapshot {
473 1 : return false
474 1 : }
475 : // Pick a random keyrange within one of the snapshot's key ranges.
476 1 : parentBounds := pickOneUniform(g.rng, snapBounds)
477 1 : // With 10% probability, use the parent start bound as-is.
478 1 : if g.rng.Float64() <= 0.1 {
479 1 : opts.lower = parentBounds.Start
480 1 : } else {
481 1 : opts.lower = g.keyGenerator.RandKeyInRange(0.1, parentBounds)
482 1 : }
483 : // With 10% probability, use the parent end bound as-is.
484 1 : if g.rng.Float64() <= 0.1 {
485 1 : opts.upper = parentBounds.End
486 1 : } else {
487 1 : opts.upper = g.keyGenerator.RandKeyInRange(0.1, pebble.KeyRange{
488 1 : Start: opts.lower,
489 1 : End: parentBounds.End,
490 1 : })
491 1 : }
492 1 : return true
493 : }
494 :
495 1 : func (g *generator) newIter() {
496 1 : iterID := makeObjID(iterTag, g.init.iterSlots)
497 1 : g.init.iterSlots++
498 1 : g.liveIters = append(g.liveIters, iterID)
499 1 :
500 1 : readerID := g.liveReaders.rand(g.rng)
501 1 : if iters := g.readers[readerID]; iters != nil {
502 1 : iters[iterID] = struct{}{}
503 1 : g.iters[iterID] = iters
504 1 : //lint:ignore SA9003 - readability
505 1 : } else {
506 1 : // NB: the DB object does not track its open iterators because it never
507 1 : // closes.
508 1 : }
509 1 : g.iterReaderID[iterID] = readerID
510 1 : dbID := g.deriveDB(iterID)
511 1 :
512 1 : var opts iterOpts
513 1 : if !g.maybeSetSnapshotIterBounds(readerID, &opts) {
514 1 : // Generate lower/upper bounds with a 10% probability.
515 1 : if g.rng.Float64() <= 0.1 {
516 1 : // Generate a new key with a .1% probability.
517 1 : opts.lower = g.keyGenerator.RandKey(0.001)
518 1 : }
519 1 : if g.rng.Float64() <= 0.1 {
520 1 : // Generate a new key with a .1% probability.
521 1 : opts.upper = g.keyGenerator.RandKey(0.001)
522 1 : }
523 1 : if g.cmp(opts.lower, opts.upper) > 0 {
524 1 : opts.lower, opts.upper = opts.upper, opts.lower
525 1 : }
526 : }
527 1 : opts.keyTypes, opts.maskSuffix = g.randKeyTypesAndMask()
528 1 :
529 1 : // With 10% probability, enable automatic filtering of keys with suffixes
530 1 : // not in the provided range. This filtering occurs both through
531 1 : // block-property filtering and explicitly within the iterator operations to
532 1 : // ensure determinism.
533 1 : if g.rng.Float64() <= 0.1 {
534 1 : opts.filterMin = uint64(g.keyGenerator.UniformSuffixInt() + 1)
535 1 : opts.filterMax = uint64(g.keyGenerator.UniformSuffixInt() + 1)
536 1 : if opts.filterMin > opts.filterMax {
537 1 : opts.filterMin, opts.filterMax = opts.filterMax, opts.filterMin
538 1 : } else if opts.filterMin == opts.filterMax {
539 1 : opts.filterMax++
540 1 : }
541 : }
542 :
543 : // Enable L6 filters with a 10% probability.
544 1 : if g.rng.Float64() <= 0.1 {
545 1 : opts.useL6Filters = true
546 1 : }
547 :
548 1 : g.itersLastOpts[iterID] = opts
549 1 : g.iterVisibleKeys[iterID] = g.keyManager.getSetOfVisibleKeys(readerID)
550 1 : g.iterReaderID[iterID] = readerID
551 1 : g.add(&newIterOp{
552 1 : readerID: readerID,
553 1 : iterID: iterID,
554 1 : iterOpts: opts,
555 1 : derivedDBID: dbID,
556 1 : })
557 : }
558 :
559 1 : func (g *generator) randKeyTypesAndMask() (keyTypes uint32, maskSuffix []byte) {
560 1 : // Iterate over different key types.
561 1 : p := g.rng.Float64()
562 1 : switch {
563 1 : case p < 0.2: // 20% probability
564 1 : keyTypes = uint32(pebble.IterKeyTypePointsOnly)
565 1 : case p < 0.8: // 60% probability
566 1 : keyTypes = uint32(pebble.IterKeyTypePointsAndRanges)
567 1 : // With 50% probability, enable masking.
568 1 : if g.rng.IntN(2) == 1 {
569 1 : maskSuffix = g.keyGenerator.UniformSuffix()
570 1 : }
571 1 : default: // 20% probability
572 1 : keyTypes = uint32(pebble.IterKeyTypeRangesOnly)
573 : }
574 1 : return keyTypes, maskSuffix
575 : }
576 :
577 1 : func (g *generator) deriveDB(readerID objID) objID {
578 1 : dbParentID := readerID
579 1 : if readerID.tag() == iterTag {
580 1 : dbParentID = g.iterReaderID[readerID]
581 1 : }
582 1 : if dbParentID.tag() != dbTag {
583 1 : dbParentID = g.dbIDForObj(dbParentID)
584 1 : }
585 1 : g.objDB[readerID] = dbParentID
586 1 : return dbParentID
587 : }
588 :
589 1 : func (g *generator) dbIDForObj(objID objID) objID {
590 1 : if g.objDB[objID] == 0 {
591 0 : panic(fmt.Sprintf("object %s has no associated DB", objID))
592 : }
593 1 : return g.objDB[objID]
594 : }
595 :
596 1 : func (g *generator) newIterUsingClone() {
597 1 : if len(g.liveIters) == 0 {
598 1 : return
599 1 : }
600 1 : existingIterID := g.liveIters.rand(g.rng)
601 1 : iterID := makeObjID(iterTag, g.init.iterSlots)
602 1 : g.init.iterSlots++
603 1 : g.liveIters = append(g.liveIters, iterID)
604 1 : if iters := g.iters[existingIterID]; iters != nil {
605 1 : iters[iterID] = struct{}{}
606 1 : g.iters[iterID] = iters
607 1 : //lint:ignore SA9003 - readability
608 1 : } else {
609 1 : // NB: the DB object does not track its open iterators because it never
610 1 : // closes.
611 1 : }
612 1 : readerID := g.iterReaderID[existingIterID]
613 1 : g.iterReaderID[iterID] = readerID
614 1 : g.deriveDB(iterID)
615 1 :
616 1 : var refreshBatch bool
617 1 : visibleKeys := g.iterVisibleKeys[existingIterID]
618 1 : if readerID.tag() == batchTag {
619 0 : if refreshBatch = g.rng.IntN(2) == 1; refreshBatch {
620 0 : visibleKeys = g.keyManager.getSetOfVisibleKeys(readerID)
621 0 : }
622 : }
623 :
624 1 : opts := g.itersLastOpts[existingIterID]
625 1 : // With 50% probability, consider modifying the iterator options used by the
626 1 : // clone.
627 1 : if g.rng.IntN(2) == 1 {
628 1 : g.maybeMutateOptions(readerID, &opts)
629 1 : }
630 1 : g.itersLastOpts[iterID] = opts
631 1 :
632 1 : // Copy the visible keys from the existing iterator.
633 1 : g.iterVisibleKeys[iterID] = visibleKeys
634 1 : g.iterReaderID[iterID] = g.iterReaderID[existingIterID]
635 1 : g.add(&newIterUsingCloneOp{
636 1 : existingIterID: existingIterID,
637 1 : iterID: iterID,
638 1 : refreshBatch: refreshBatch,
639 1 : iterOpts: opts,
640 1 : derivedReaderID: readerID,
641 1 : })
642 : }
643 :
644 1 : func (g *generator) iterClose(iterID objID) {
645 1 : g.liveIters.remove(iterID)
646 1 : if readerIters, ok := g.iters[iterID]; ok {
647 1 : delete(g.iters, iterID)
648 1 : delete(readerIters, iterID)
649 1 : }
650 :
651 1 : g.add(&closeOp{objID: iterID})
652 : }
653 :
654 1 : func (g *generator) iterSetBounds(iterID objID) {
655 1 : iterLastOpts := g.itersLastOpts[iterID]
656 1 : newOpts := iterLastOpts
657 1 : // TODO(jackson): The logic to increase the probability of advancing bounds
658 1 : // monotonically only applies if the snapshot is not bounded. Refactor to
659 1 : // allow bounded snapshots to benefit too, when possible.
660 1 : if !g.maybeSetSnapshotIterBounds(g.iterReaderID[iterID], &newOpts) {
661 1 : var lower, upper []byte
662 1 : genLower := g.rng.Float64() <= 0.9
663 1 : genUpper := g.rng.Float64() <= 0.9
664 1 : // When one of ensureLowerGE, ensureUpperLE is true, the new bounds
665 1 : // don't overlap with the previous bounds.
666 1 : var ensureLowerGE, ensureUpperLE bool
667 1 : if genLower && iterLastOpts.upper != nil && g.rng.Float64() <= 0.9 {
668 1 : ensureLowerGE = true
669 1 : }
670 1 : if (!ensureLowerGE || g.rng.Float64() < 0.5) && genUpper && iterLastOpts.lower != nil {
671 1 : ensureUpperLE = true
672 1 : ensureLowerGE = false
673 1 : }
674 1 : attempts := 0
675 1 : for {
676 1 : attempts++
677 1 : if genLower {
678 1 : // Generate a new key with a .1% probability.
679 1 : lower = g.keyGenerator.RandKey(0.001)
680 1 : }
681 1 : if genUpper {
682 1 : // Generate a new key with a .1% probability.
683 1 : upper = g.keyGenerator.RandKey(0.001)
684 1 : }
685 1 : if g.cmp(lower, upper) > 0 {
686 1 : lower, upper = upper, lower
687 1 : }
688 1 : if ensureLowerGE && g.cmp(iterLastOpts.upper, lower) > 0 {
689 1 : if attempts < 25 {
690 1 : continue
691 : }
692 1 : lower = iterLastOpts.upper
693 1 : upper = lower
694 1 : break
695 : }
696 1 : if ensureUpperLE && g.cmp(upper, iterLastOpts.lower) > 0 {
697 1 : if attempts < 25 {
698 1 : continue
699 : }
700 1 : upper = iterLastOpts.lower
701 1 : lower = upper
702 1 : break
703 : }
704 1 : break
705 : }
706 1 : newOpts.lower = lower
707 1 : newOpts.upper = upper
708 : }
709 1 : g.itersLastOpts[iterID] = newOpts
710 1 : g.add(&iterSetBoundsOp{
711 1 : iterID: iterID,
712 1 : lower: newOpts.lower,
713 1 : upper: newOpts.upper,
714 1 : })
715 1 : // Additionally seek the iterator in a manner consistent with the bounds,
716 1 : // and do some steps (Next/Prev). The seeking exercises typical
717 1 : // CockroachDB behavior when using iterators and the steps are trying to
718 1 : // stress the region near the bounds. Ideally, we should not do this as
719 1 : // part of generating a single op, but this is easier than trying to
720 1 : // control future op generation via generator state.
721 1 : doSeekLT := newOpts.upper != nil && g.rng.Float64() < 0.5
722 1 : doSeekGE := newOpts.lower != nil && g.rng.Float64() < 0.5
723 1 : if doSeekLT && doSeekGE {
724 1 : // Pick the seek.
725 1 : if g.rng.Float64() < 0.5 {
726 1 : doSeekGE = false
727 1 : } else {
728 1 : doSeekLT = false
729 1 : }
730 : }
731 1 : if doSeekLT {
732 1 : g.add(&iterSeekLTOp{
733 1 : iterID: iterID,
734 1 : key: newOpts.upper,
735 1 : derivedReaderID: g.iterReaderID[iterID],
736 1 : })
737 1 : if g.rng.Float64() < 0.5 {
738 1 : g.iterNext(iterID)
739 1 : }
740 1 : if g.rng.Float64() < 0.5 {
741 1 : g.iterNext(iterID)
742 1 : }
743 1 : if g.rng.Float64() < 0.5 {
744 1 : g.iterPrev(iterID)
745 1 : }
746 1 : } else if doSeekGE {
747 1 : g.add(&iterSeekGEOp{
748 1 : iterID: iterID,
749 1 : key: newOpts.lower,
750 1 : derivedReaderID: g.iterReaderID[iterID],
751 1 : })
752 1 : if g.rng.Float64() < 0.5 {
753 1 : g.iterPrev(iterID)
754 1 : }
755 1 : if g.rng.Float64() < 0.5 {
756 1 : g.iterPrev(iterID)
757 1 : }
758 1 : if g.rng.Float64() < 0.5 {
759 1 : g.iterNext(iterID)
760 1 : }
761 : }
762 : }
763 :
764 1 : func (g *generator) iterSetOptions(iterID objID) {
765 1 : opts := g.itersLastOpts[iterID]
766 1 : g.maybeMutateOptions(g.iterReaderID[iterID], &opts)
767 1 : g.itersLastOpts[iterID] = opts
768 1 : g.add(&iterSetOptionsOp{
769 1 : iterID: iterID,
770 1 : iterOpts: opts,
771 1 : derivedReaderID: g.iterReaderID[iterID],
772 1 : })
773 1 :
774 1 : // Additionally, perform a random absolute positioning operation. The
775 1 : // SetOptions contract requires one before the next relative positioning
776 1 : // operation. Ideally, we should not do this as part of generating a single
777 1 : // op, but this is easier than trying to control future op generation via
778 1 : // generator state.
779 1 : pickOneUniform(
780 1 : g.rng,
781 1 : []func(objID){
782 1 : g.iterFirst,
783 1 : g.iterLast,
784 1 : g.iterSeekGE,
785 1 : g.iterSeekGEWithLimit,
786 1 : g.iterSeekPrefixGE,
787 1 : g.iterSeekLT,
788 1 : g.iterSeekLTWithLimit,
789 1 : },
790 1 : )(iterID)
791 1 : }
792 :
793 1 : func (g *generator) iterSeekGE(iterID objID) {
794 1 : g.add(&iterSeekGEOp{
795 1 : iterID: iterID,
796 1 : key: g.keyGenerator.RandKey(0.001), // 0.1% new keys
797 1 : derivedReaderID: g.iterReaderID[iterID],
798 1 : })
799 1 : }
800 :
801 1 : func (g *generator) iterSeekGEWithLimit(iterID objID) {
802 1 : // 0.1% new keys
803 1 : key, limit := g.keyGenerator.RandKey(0.001), g.keyGenerator.RandKey(0.001)
804 1 : if g.cmp(key, limit) > 0 {
805 1 : key, limit = limit, key
806 1 : }
807 1 : g.add(&iterSeekGEOp{
808 1 : iterID: iterID,
809 1 : key: key,
810 1 : limit: limit,
811 1 : derivedReaderID: g.iterReaderID[iterID],
812 1 : })
813 : }
814 :
815 1 : func (g *generator) iterSeekPrefixGE(iterID objID) {
816 1 : // Purely random key selection is unlikely to pick a key with any visible
817 1 : // versions, especially if we don't take iterator bounds into account. We
818 1 : // try to err towards picking a key within bounds that contains a value
819 1 : // visible to the iterator.
820 1 : lower := g.itersLastOpts[iterID].lower
821 1 : upper := g.itersLastOpts[iterID].upper
822 1 : var key []byte
823 1 : if g.rng.IntN(5) >= 1 {
824 1 : visibleKeys := g.iterVisibleKeys[iterID]
825 1 : if lower != nil {
826 1 : i, _ := slices.BinarySearchFunc(visibleKeys, lower, g.cmp)
827 1 : visibleKeys = visibleKeys[i:]
828 1 : }
829 1 : if upper != nil {
830 1 : i, _ := slices.BinarySearchFunc(visibleKeys, upper, g.cmp)
831 1 : visibleKeys = visibleKeys[:i]
832 1 : }
833 1 : if len(visibleKeys) > 0 {
834 1 : key = visibleKeys[g.rng.IntN(len(visibleKeys))]
835 1 : }
836 : }
837 1 : if key == nil {
838 1 : key = g.keyGenerator.RandKey(0) // 0% new keys
839 1 : }
840 : // Sometimes limit the key to just the prefix.
841 1 : if g.rng.IntN(3) == 1 {
842 1 : key = g.keyManager.comparer.Split.Prefix(key)
843 1 : }
844 1 : g.add(&iterSeekPrefixGEOp{
845 1 : iterID: iterID,
846 1 : key: key,
847 1 : derivedReaderID: g.iterReaderID[iterID],
848 1 : })
849 : }
850 :
851 1 : func (g *generator) iterSeekLT(iterID objID) {
852 1 : g.add(&iterSeekLTOp{
853 1 : iterID: iterID,
854 1 : key: g.keyGenerator.RandKey(0.001), // 0.1% new keys
855 1 : derivedReaderID: g.iterReaderID[iterID],
856 1 : })
857 1 : }
858 :
859 1 : func (g *generator) iterSeekLTWithLimit(iterID objID) {
860 1 : // 0.1% new keys
861 1 : key, limit := g.keyGenerator.RandKey(0.001), g.keyGenerator.RandKey(0.001)
862 1 : if g.cmp(limit, key) > 0 {
863 1 : key, limit = limit, key
864 1 : }
865 1 : g.add(&iterSeekLTOp{
866 1 : iterID: iterID,
867 1 : key: key,
868 1 : limit: limit,
869 1 : derivedReaderID: g.iterReaderID[iterID],
870 1 : })
871 : }
872 :
873 : // randIter performs partial func application ("currying"), returning a new
874 : // function that supplies the given func with a random iterator.
875 1 : func (g *generator) randIter(gen func(objID)) func() {
876 1 : return func() {
877 1 : if len(g.liveIters) == 0 {
878 1 : return
879 1 : }
880 1 : gen(g.liveIters.rand(g.rng))
881 : }
882 : }
883 :
884 1 : func (g *generator) iterFirst(iterID objID) {
885 1 : g.add(&iterFirstOp{
886 1 : iterID: iterID,
887 1 : derivedReaderID: g.iterReaderID[iterID],
888 1 : })
889 1 : }
890 :
891 1 : func (g *generator) iterLast(iterID objID) {
892 1 : g.add(&iterLastOp{
893 1 : iterID: iterID,
894 1 : derivedReaderID: g.iterReaderID[iterID],
895 1 : })
896 1 : }
897 :
898 1 : func (g *generator) iterNext(iterID objID) {
899 1 : g.add(&iterNextOp{
900 1 : iterID: iterID,
901 1 : derivedReaderID: g.iterReaderID[iterID],
902 1 : })
903 1 : }
904 :
905 1 : func (g *generator) iterPrev(iterID objID) {
906 1 : g.add(&iterPrevOp{
907 1 : iterID: iterID,
908 1 : derivedReaderID: g.iterReaderID[iterID],
909 1 : })
910 1 : }
911 :
912 1 : func (g *generator) iterNextWithLimit(iterID objID) {
913 1 : g.add(&iterNextOp{
914 1 : iterID: iterID,
915 1 : limit: g.keyGenerator.RandKey(0.001), // 0.1% new keys
916 1 : derivedReaderID: g.iterReaderID[iterID],
917 1 : })
918 1 : }
919 :
920 1 : func (g *generator) iterNextPrefix(iterID objID) {
921 1 : g.add(&iterNextPrefixOp{
922 1 : iterID: iterID,
923 1 : derivedReaderID: g.iterReaderID[iterID],
924 1 : })
925 1 : }
926 :
927 1 : func (g *generator) iterCanSingleDelete(iterID objID) {
928 1 : g.add(&iterCanSingleDelOp{
929 1 : iterID: iterID,
930 1 : derivedReaderID: g.iterReaderID[iterID],
931 1 : })
932 1 : }
933 :
934 1 : func (g *generator) iterPrevWithLimit(iterID objID) {
935 1 : g.add(&iterPrevOp{
936 1 : iterID: iterID,
937 1 : limit: g.keyGenerator.RandKey(0.001), // 0.1% new keys
938 1 : derivedReaderID: g.iterReaderID[iterID],
939 1 : })
940 1 : }
941 :
942 1 : func (g *generator) readerGet() {
943 1 : if len(g.liveReaders) == 0 {
944 0 : return
945 0 : }
946 :
947 1 : readerID := g.liveReaders.rand(g.rng)
948 1 :
949 1 : // If the chosen reader is a snapshot created with user-specified key
950 1 : // ranges, restrict the read to fall within one of the provided key ranges.
951 1 : var key []byte
952 1 : if bounds := g.snapshotBounds[readerID]; len(bounds) > 0 {
953 1 : kr := bounds[g.rng.IntN(len(bounds))]
954 1 : key = g.keyGenerator.RandKeyInRange(0.001, kr) // 0.1% new keys
955 1 : } else {
956 1 : key = g.keyGenerator.RandKey(0.001) // 0.1% new keys
957 1 : }
958 1 : derivedDBID := objID(0)
959 1 : if readerID.tag() == batchTag || readerID.tag() == snapTag {
960 1 : derivedDBID = g.deriveDB(readerID)
961 1 : }
962 1 : g.add(&getOp{readerID: readerID, key: key, derivedDBID: derivedDBID})
963 : }
964 :
965 1 : func (g *generator) replicate() {
966 1 : if len(g.dbs) < 2 {
967 0 : return
968 0 : }
969 :
970 1 : source := g.dbs.rand(g.rng)
971 1 : dest := source
972 1 : for dest == source {
973 1 : dest = g.dbs.rand(g.rng)
974 1 : }
975 :
976 1 : startKey, endKey := g.prefixKeyRange()
977 1 : g.add(&replicateOp{
978 1 : source: source,
979 1 : dest: dest,
980 1 : start: startKey,
981 1 : end: endKey,
982 1 : })
983 : }
984 :
985 : // generateDisjointKeyRanges generates n disjoint key ranges.
986 1 : func (g *generator) generateDisjointKeyRanges(n int) []pebble.KeyRange {
987 1 : keys := g.keyGenerator.UniqueKeys(2*n, func() []byte { return g.keyGenerator.RandPrefix(0.1) })
988 1 : keyRanges := make([]pebble.KeyRange, n)
989 1 : for i := range keyRanges {
990 1 : keyRanges[i] = pebble.KeyRange{
991 1 : Start: keys[i*2],
992 1 : End: keys[i*2+1],
993 1 : }
994 1 : }
995 1 : return keyRanges
996 : }
997 :
998 1 : func (g *generator) newSnapshot() {
999 1 : snapID := makeObjID(snapTag, g.init.snapshotSlots)
1000 1 : g.init.snapshotSlots++
1001 1 : g.liveSnapshots = append(g.liveSnapshots, snapID)
1002 1 : g.liveReaders = append(g.liveReaders, snapID)
1003 1 : dbID := g.dbs.rand(g.rng)
1004 1 : g.objDB[snapID] = dbID
1005 1 :
1006 1 : iters := make(objIDSet)
1007 1 : g.snapshots[snapID] = iters
1008 1 : g.readers[snapID] = iters
1009 1 :
1010 1 : s := &newSnapshotOp{
1011 1 : dbID: dbID,
1012 1 : snapID: snapID,
1013 1 : }
1014 1 :
1015 1 : // Impose bounds on the keys that may be read with the snapshot. Setting bounds
1016 1 : // allows some runs of the metamorphic test to use a EventuallyFileOnlySnapshot
1017 1 : // instead of a Snapshot, testing equivalence between the two for reads within
1018 1 : // those bounds.
1019 1 : s.bounds = g.generateDisjointKeyRanges(
1020 1 : 1 + g.expRandInt(3),
1021 1 : )
1022 1 : g.snapshotBounds[snapID] = s.bounds
1023 1 : g.add(s)
1024 1 : }
1025 :
1026 1 : func (g *generator) snapshotClose() {
1027 1 : if len(g.liveSnapshots) == 0 {
1028 1 : return
1029 1 : }
1030 :
1031 1 : snapID := g.liveSnapshots.rand(g.rng)
1032 1 : g.liveSnapshots.remove(snapID)
1033 1 : iters := g.snapshots[snapID]
1034 1 : delete(g.snapshots, snapID)
1035 1 : g.liveReaders.remove(snapID)
1036 1 : delete(g.readers, snapID)
1037 1 :
1038 1 : for _, id := range iters.sorted() {
1039 1 : g.liveIters.remove(id)
1040 1 : delete(g.iters, id)
1041 1 : g.add(&closeOp{objID: id})
1042 1 : }
1043 :
1044 1 : g.add(&closeOp{objID: snapID})
1045 : }
1046 :
1047 1 : func (g *generator) newExternalObj() {
1048 1 : if len(g.liveBatches) == 0 {
1049 1 : return
1050 1 : }
1051 1 : var batchID objID
1052 1 : // Try to find a suitable batch.
1053 1 : for i := 0; ; i++ {
1054 1 : if i == 10 {
1055 1 : return
1056 1 : }
1057 1 : batchID = g.liveBatches.rand(g.rng)
1058 1 : okm := g.keyManager.objKeyMeta(batchID)
1059 1 : if !okm.bounds.IsUnset() {
1060 1 : break
1061 : }
1062 : }
1063 1 : g.removeBatchFromGenerator(batchID)
1064 1 : objID := makeObjID(externalObjTag, g.init.externalObjSlots)
1065 1 : g.init.externalObjSlots++
1066 1 : g.externalObjects = append(g.externalObjects, objID)
1067 1 : g.add(&newExternalObjOp{
1068 1 : batchID: batchID,
1069 1 : externalObjID: objID,
1070 1 : })
1071 : }
1072 :
1073 1 : func (g *generator) writerApply() {
1074 1 : if len(g.liveBatches) == 0 {
1075 1 : return
1076 1 : }
1077 1 : if len(g.liveWriters) < 2 {
1078 0 : panic(fmt.Sprintf("insufficient liveWriters (%d) to apply batch", len(g.liveWriters)))
1079 : }
1080 :
1081 1 : batchID := g.liveBatches.rand(g.rng)
1082 1 : dbID := g.dbIDForObj(batchID)
1083 1 :
1084 1 : var writerID objID
1085 1 : for {
1086 1 : // NB: The writer we're applying to, as well as the batch we're applying,
1087 1 : // must be from the same DB. The writer could be the db itself. Applying
1088 1 : // a batch from one DB on another DB results in a panic, so avoid that.
1089 1 : writerID = g.liveWriters.rand(g.rng)
1090 1 : writerDBID := writerID
1091 1 : if writerID.tag() != dbTag {
1092 1 : writerDBID = g.dbIDForObj(writerID)
1093 1 : }
1094 1 : if writerID != batchID && writerDBID == dbID {
1095 1 : break
1096 : }
1097 : }
1098 :
1099 : // The batch we're applying may contain single delete tombstones that when
1100 : // applied to the writer result in nondeterminism in the deleted key. If
1101 : // that's the case, we can restore determinism by first deleting the key
1102 : // from the writer.
1103 : //
1104 : // Generating additional operations here is not ideal, but it simplifies
1105 : // single delete invariants significantly.
1106 1 : singleDeleteConflicts := g.keyManager.checkForSingleDelConflicts(batchID, writerID, false /* collapsed */)
1107 1 : for _, conflict := range singleDeleteConflicts {
1108 0 : g.add(&deleteOp{
1109 0 : writerID: writerID,
1110 0 : key: conflict,
1111 0 : derivedDBID: dbID,
1112 0 : })
1113 0 : }
1114 :
1115 1 : g.removeBatchFromGenerator(batchID)
1116 1 :
1117 1 : g.add(&applyOp{
1118 1 : writerID: writerID,
1119 1 : batchID: batchID,
1120 1 : })
1121 1 : g.add(&closeOp{
1122 1 : objID: batchID,
1123 1 : })
1124 : }
1125 :
1126 1 : func (g *generator) writerDelete() {
1127 1 : if len(g.liveWriters) == 0 {
1128 0 : return
1129 0 : }
1130 :
1131 1 : writerID := g.liveWriters.rand(g.rng)
1132 1 : derivedDBID := writerID
1133 1 : if derivedDBID.tag() != dbTag {
1134 1 : derivedDBID = g.dbIDForObj(writerID)
1135 1 : }
1136 1 : g.add(&deleteOp{
1137 1 : writerID: writerID,
1138 1 : key: g.keyGenerator.RandKey(0.001), // 0.1% new keys
1139 1 : derivedDBID: derivedDBID,
1140 1 : })
1141 : }
1142 :
1143 1 : func (g *generator) writerDeleteRange() {
1144 1 : if len(g.liveWriters) == 0 {
1145 0 : return
1146 0 : }
1147 :
1148 1 : keys := g.keyGenerator.UniqueKeys(2, func() []byte { return g.keyGenerator.RandKey(0.001) })
1149 1 : start, end := keys[0], keys[1]
1150 1 :
1151 1 : writerID := g.liveWriters.rand(g.rng)
1152 1 : g.add(&deleteRangeOp{
1153 1 : writerID: writerID,
1154 1 : start: start,
1155 1 : end: end,
1156 1 : })
1157 : }
1158 :
1159 1 : func (g *generator) writerRangeKeyDelete() {
1160 1 : if len(g.liveWriters) == 0 {
1161 0 : return
1162 0 : }
1163 1 : start, end := g.prefixKeyRange()
1164 1 :
1165 1 : writerID := g.liveWriters.rand(g.rng)
1166 1 : g.add(&rangeKeyDeleteOp{
1167 1 : writerID: writerID,
1168 1 : start: start,
1169 1 : end: end,
1170 1 : })
1171 : }
1172 :
1173 1 : func (g *generator) writerRangeKeySet() {
1174 1 : if len(g.liveWriters) == 0 {
1175 0 : return
1176 0 : }
1177 1 : start, end := g.prefixKeyRange()
1178 1 :
1179 1 : // 90% of the time, set a suffix.
1180 1 : var suffix []byte
1181 1 : if g.rng.Float64() < 0.90 {
1182 1 : // Increase the max suffix 5% of the time.
1183 1 : suffix = g.keyGenerator.SkewedSuffix(0.05)
1184 1 : }
1185 :
1186 1 : writerID := g.liveWriters.rand(g.rng)
1187 1 : g.add(&rangeKeySetOp{
1188 1 : writerID: writerID,
1189 1 : start: start,
1190 1 : end: end,
1191 1 : suffix: suffix,
1192 1 : value: randBytes(g.rng, 0, maxValueSize),
1193 1 : })
1194 : }
1195 :
1196 1 : func (g *generator) writerRangeKeyUnset() {
1197 1 : if len(g.liveWriters) == 0 {
1198 0 : return
1199 0 : }
1200 1 : start, end := g.prefixKeyRange()
1201 1 :
1202 1 : // 90% of the time, set a suffix.
1203 1 : var suffix []byte
1204 1 : if g.rng.Float64() < 0.90 {
1205 1 : // Increase the max suffix 5% of the time.
1206 1 : suffix = g.keyGenerator.SkewedSuffix(0.05)
1207 1 : }
1208 :
1209 : // TODO(jackson): Increase probability of effective unsets? Purely random
1210 : // unsets are unlikely to remove an active range key.
1211 :
1212 1 : writerID := g.liveWriters.rand(g.rng)
1213 1 : g.add(&rangeKeyUnsetOp{
1214 1 : writerID: writerID,
1215 1 : start: start,
1216 1 : end: end,
1217 1 : suffix: suffix,
1218 1 : })
1219 : }
1220 :
1221 1 : func (g *generator) writerIngest() {
1222 1 : if len(g.liveBatches) == 0 {
1223 1 : return
1224 1 : }
1225 :
1226 1 : dbID := g.dbs.rand(g.rng)
1227 1 : n := min(1+g.expRandInt(1), len(g.liveBatches))
1228 1 : batchIDs := make([]objID, n)
1229 1 : derivedDBIDs := make([]objID, n)
1230 1 : for i := 0; i < n; i++ {
1231 1 : batchID := g.liveBatches.rand(g.rng)
1232 1 : batchIDs[i] = batchID
1233 1 : derivedDBIDs[i] = g.dbIDForObj(batchID)
1234 1 : g.removeBatchFromGenerator(batchID)
1235 1 : }
1236 :
1237 : // Ingestions may fail if the ingested sstables overlap one another.
1238 : // Either it succeeds and its keys are committed to the DB, or it fails and
1239 : // the keys are not committed.
1240 1 : if !g.keyManager.doObjectBoundsOverlap(batchIDs) {
1241 1 : // This ingestion will succeed.
1242 1 : //
1243 1 : // The batches we're ingesting may contain single delete tombstones that
1244 1 : // when applied to the writer result in nondeterminism in the deleted key.
1245 1 : // If that's the case, we can restore determinism by first deleting the keys
1246 1 : // from the writer.
1247 1 : //
1248 1 : // Generating additional operations here is not ideal, but it simplifies
1249 1 : // single delete invariants significantly.
1250 1 : for _, batchID := range batchIDs {
1251 1 : singleDeleteConflicts := g.keyManager.checkForSingleDelConflicts(batchID, dbID, true /* collapsed */)
1252 1 : for _, conflict := range singleDeleteConflicts {
1253 1 : g.add(&deleteOp{
1254 1 : writerID: dbID,
1255 1 : key: conflict,
1256 1 : derivedDBID: dbID,
1257 1 : })
1258 1 : }
1259 : }
1260 : }
1261 1 : g.add(&ingestOp{
1262 1 : dbID: dbID,
1263 1 : batchIDs: batchIDs,
1264 1 : derivedDBIDs: derivedDBIDs,
1265 1 : })
1266 : }
1267 :
1268 1 : func (g *generator) writerIngestAndExcise() {
1269 1 : if len(g.liveBatches) == 0 {
1270 1 : return
1271 1 : }
1272 :
1273 1 : dbID := g.dbs.rand(g.rng)
1274 1 : batchID := g.liveBatches.rand(g.rng)
1275 1 : g.removeBatchFromGenerator(batchID)
1276 1 :
1277 1 : start, end := g.prefixKeyRange()
1278 1 : derivedDBID := g.dbIDForObj(batchID)
1279 1 :
1280 1 : // Check for any single delete conflicts. If this batch is single-deleting
1281 1 : // a key that isn't safe to single delete in the underlying db, _and_ this
1282 1 : // key is not in the excise span, we add a delete before the ingestAndExcise.
1283 1 : singleDeleteConflicts := g.keyManager.checkForSingleDelConflicts(batchID, dbID, true /* collapsed */)
1284 1 : for _, conflict := range singleDeleteConflicts {
1285 0 : if g.cmp(conflict, start) >= 0 && g.cmp(conflict, end) < 0 {
1286 0 : // This key will get excised anyway.
1287 0 : continue
1288 : }
1289 0 : g.add(&deleteOp{
1290 0 : writerID: dbID,
1291 0 : key: conflict,
1292 0 : derivedDBID: dbID,
1293 0 : })
1294 : }
1295 :
1296 1 : g.add(&ingestAndExciseOp{
1297 1 : dbID: dbID,
1298 1 : batchID: batchID,
1299 1 : derivedDBID: derivedDBID,
1300 1 : exciseStart: start,
1301 1 : exciseEnd: end,
1302 1 : })
1303 : }
1304 :
1305 1 : func (g *generator) writerIngestExternalFiles() {
1306 1 : if len(g.externalObjects) == 0 {
1307 1 : return
1308 1 : }
1309 1 : dbID := g.dbs.rand(g.rng)
1310 1 : numFiles := 1 + g.expRandInt(1)
1311 1 : objs := make([]externalObjWithBounds, numFiles)
1312 1 :
1313 1 : // We generate the parameters in multiple passes:
1314 1 : // 1. Generate objs with random start and end keys. Their bounds can overlap.
1315 1 : // 2. Sort objects by the start bound and trim the bounds to remove overlap.
1316 1 : // 3. Remove any objects where the previous step resulted in empty bounds.
1317 1 : // 4. Randomly add synthetic suffixes.
1318 1 :
1319 1 : for i := range objs {
1320 1 : // We allow the same object to be selected multiple times.
1321 1 : id := g.externalObjects.rand(g.rng)
1322 1 : b := g.keyManager.objKeyMeta(id).bounds
1323 1 :
1324 1 : objStart := g.prefix(b.smallest)
1325 1 : objEnd := g.prefix(b.largest)
1326 1 : if !b.largestExcl || len(objEnd) != len(b.largest) {
1327 1 : // Move up the end key a bit by appending a few letters to the prefix.
1328 1 : objEnd = append(objEnd, randBytes(g.rng, 1, 3)...)
1329 1 : }
1330 1 : if g.cmp(objStart, objEnd) >= 0 {
1331 0 : panic("bug in generating obj bounds")
1332 : }
1333 : // Generate two random keys within the given bounds.
1334 : // First, generate a start key in the range [objStart, objEnd).
1335 1 : start := g.keyGenerator.RandKeyInRange(0.01, pebble.KeyRange{
1336 1 : Start: objStart,
1337 1 : End: objEnd,
1338 1 : })
1339 1 : start = g.prefix(start)
1340 1 : // Second, generate an end key in the range (start, objEnd]. To do this, we
1341 1 : // generate a key in the range [start, objEnd) and if we get `start`, we
1342 1 : // remap that to `objEnd`.
1343 1 : end := g.keyGenerator.RandKeyInRange(0.01, pebble.KeyRange{
1344 1 : Start: start,
1345 1 : End: objEnd,
1346 1 : })
1347 1 : end = g.prefix(end)
1348 1 : if g.cmp(start, end) == 0 {
1349 1 : end = objEnd
1350 1 : }
1351 : // Randomly set up synthetic prefix.
1352 1 : var syntheticPrefix sstable.SyntheticPrefix
1353 1 : if g.rng.IntN(2) == 0 {
1354 1 : syntheticPrefix = randBytes(g.rng, 1, 5)
1355 1 : start = syntheticPrefix.Apply(start)
1356 1 : end = syntheticPrefix.Apply(end)
1357 1 : }
1358 :
1359 1 : objs[i] = externalObjWithBounds{
1360 1 : externalObjID: id,
1361 1 : bounds: pebble.KeyRange{
1362 1 : Start: start,
1363 1 : End: end,
1364 1 : },
1365 1 : syntheticPrefix: syntheticPrefix,
1366 1 : }
1367 : }
1368 :
1369 : // Sort by start bound.
1370 1 : slices.SortFunc(objs, func(a, b externalObjWithBounds) int {
1371 1 : return g.cmp(a.bounds.Start, b.bounds.Start)
1372 1 : })
1373 :
1374 : // Trim bounds so that there is no overlap.
1375 1 : for i := 0; i < len(objs)-1; i++ {
1376 1 : if g.cmp(objs[i].bounds.End, objs[i+1].bounds.Start) > 0 {
1377 1 : objs[i].bounds.End = objs[i+1].bounds.Start
1378 1 : }
1379 : }
1380 : // Some bounds might be empty now, remove those objects altogether. Note that
1381 : // the last object is unmodified, so at least that object will remain.
1382 1 : objs = slices.DeleteFunc(objs, func(o externalObjWithBounds) bool {
1383 1 : return g.cmp(o.bounds.Start, o.bounds.End) >= 0
1384 1 : })
1385 :
1386 : // Randomly set synthetic suffixes.
1387 1 : for i := range objs {
1388 1 : if g.rng.IntN(2) == 0 {
1389 1 : // We can only use a synthetic suffix if we don't have range dels or RangeKeyUnsets.
1390 1 : if meta := g.keyManager.objKeyMeta(objs[i].externalObjID); meta.hasRangeDels || meta.hasRangeKeyUnset {
1391 1 : continue
1392 : }
1393 :
1394 : // We can only use a synthetic suffix if we don't have overlapping range
1395 : // key sets (because they will become logically conflicting when we
1396 : // replace their suffixes with the synthetic one).
1397 1 : if g.keyManager.ExternalObjectHasOverlappingRangeKeySets(objs[i].externalObjID) {
1398 0 : continue
1399 : }
1400 :
1401 : // Generate a suffix that sorts before any previously generated suffix.
1402 1 : objs[i].syntheticSuffix = g.keyGenerator.IncMaxSuffix()
1403 : }
1404 : }
1405 :
1406 : // The batches we're ingesting may contain single delete tombstones that when
1407 : // applied to the db result in nondeterminism in the deleted key. If that's
1408 : // the case, we can restore determinism by first deleting the keys from the
1409 : // db.
1410 : //
1411 : // Generating additional operations here is not ideal, but it simplifies
1412 : // single delete invariants significantly.
1413 1 : dbKeys := g.keyManager.objKeyMeta(dbID)
1414 1 : for _, o := range objs {
1415 1 : for _, src := range g.keyManager.KeysForExternalIngest(o) {
1416 1 : if g.keyManager.checkForSingleDelConflict(src, dbKeys) {
1417 0 : g.add(&deleteOp{
1418 0 : writerID: dbID,
1419 0 : key: src.key,
1420 0 : derivedDBID: dbID,
1421 0 : })
1422 0 : }
1423 : }
1424 : }
1425 :
1426 : // Shuffle the objects.
1427 1 : g.rng.Shuffle(len(objs), func(i, j int) {
1428 1 : objs[i], objs[j] = objs[j], objs[i]
1429 1 : })
1430 :
1431 1 : g.add(&ingestExternalFilesOp{
1432 1 : dbID: dbID,
1433 1 : objs: objs,
1434 1 : })
1435 : }
1436 :
1437 1 : func (g *generator) writerLogData() {
1438 1 : if len(g.liveWriters) == 0 {
1439 0 : return
1440 0 : }
1441 1 : g.add(&logDataOp{
1442 1 : writerID: g.liveWriters.rand(g.rng),
1443 1 : data: randBytes(g.rng, 0, g.expRandInt(10)),
1444 1 : })
1445 : }
1446 :
1447 1 : func (g *generator) writerMerge() {
1448 1 : if len(g.liveWriters) == 0 {
1449 0 : return
1450 0 : }
1451 :
1452 1 : writerID := g.liveWriters.rand(g.rng)
1453 1 : g.add(&mergeOp{
1454 1 : writerID: writerID,
1455 1 : // 20% new keys.
1456 1 : key: g.keyGenerator.RandKey(0.2),
1457 1 : value: randBytes(g.rng, 0, maxValueSize),
1458 1 : })
1459 : }
1460 :
1461 1 : func (g *generator) writerSet() {
1462 1 : if len(g.liveWriters) == 0 {
1463 0 : return
1464 0 : }
1465 :
1466 1 : writerID := g.liveWriters.rand(g.rng)
1467 1 : g.add(&setOp{
1468 1 : writerID: writerID,
1469 1 : // 50% new keys.
1470 1 : key: g.keyGenerator.RandKey(0.5),
1471 1 : value: randBytes(g.rng, 0, maxValueSize),
1472 1 : })
1473 : }
1474 :
1475 1 : func (g *generator) writerSingleDelete() {
1476 1 : if len(g.liveWriters) == 0 {
1477 0 : return
1478 0 : }
1479 :
1480 1 : writerID := g.liveWriters.rand(g.rng)
1481 1 : key := g.randKeyToSingleDelete(writerID)
1482 1 : if key == nil {
1483 1 : return
1484 1 : }
1485 1 : g.add(&singleDeleteOp{
1486 1 : writerID: writerID,
1487 1 : key: key,
1488 1 : // Keys eligible for single deletes can be removed with a regular
1489 1 : // delete. Mutate a percentage of SINGLEDEL ops into DELETEs. Note that
1490 1 : // here we are only determining whether the replacement *could* happen.
1491 1 : // At test runtime, the `replaceSingleDelete` test option must also be
1492 1 : // set to true for the single delete to be replaced.
1493 1 : maybeReplaceDelete: g.rng.Float64() < 0.25,
1494 1 : })
1495 : }
1496 :
1497 1 : func (g *generator) maybeMutateOptions(readerID objID, opts *iterOpts) {
1498 1 : // With 95% probability, allow changes to any options at all. This ensures
1499 1 : // that in 5% of cases there are no changes, and SetOptions hits its fast
1500 1 : // path.
1501 1 : if g.rng.IntN(100) >= 5 {
1502 1 : if !g.maybeSetSnapshotIterBounds(readerID, opts) {
1503 1 : // With 1/3 probability, clear existing bounds.
1504 1 : if opts.lower != nil && g.rng.IntN(3) == 0 {
1505 1 : opts.lower = nil
1506 1 : }
1507 1 : if opts.upper != nil && g.rng.IntN(3) == 0 {
1508 1 : opts.upper = nil
1509 1 : }
1510 : // With 1/3 probability, update the bounds.
1511 1 : if g.rng.IntN(3) == 0 {
1512 1 : // Generate a new key with a .1% probability.
1513 1 : opts.lower = g.keyGenerator.RandKey(0.001)
1514 1 : }
1515 1 : if g.rng.IntN(3) == 0 {
1516 1 : // Generate a new key with a .1% probability.
1517 1 : opts.upper = g.keyGenerator.RandKey(0.001)
1518 1 : }
1519 1 : if g.cmp(opts.lower, opts.upper) > 0 {
1520 1 : opts.lower, opts.upper = opts.upper, opts.lower
1521 1 : }
1522 : }
1523 :
1524 : // With 1/3 probability, update the key-types/mask.
1525 1 : if g.rng.IntN(3) == 0 {
1526 1 : opts.keyTypes, opts.maskSuffix = g.randKeyTypesAndMask()
1527 1 : }
1528 :
1529 : // With 1/3 probability, clear existing filter.
1530 1 : if opts.filterMax > 0 && g.rng.IntN(3) == 0 {
1531 1 : opts.filterMax, opts.filterMin = 0, 0
1532 1 : }
1533 : // With 10% probability, set a filter range.
1534 1 : if g.rng.IntN(10) == 1 {
1535 1 : opts.filterMin = uint64(g.keyGenerator.UniformSuffixInt() + 1)
1536 1 : opts.filterMax = uint64(g.keyGenerator.UniformSuffixInt() + 1)
1537 1 : if opts.filterMin > opts.filterMax {
1538 1 : opts.filterMin, opts.filterMax = opts.filterMax, opts.filterMin
1539 1 : } else if opts.filterMin == opts.filterMax {
1540 1 : opts.filterMax = opts.filterMin + 1
1541 1 : }
1542 : }
1543 : // With 10% probability, flip enablement of L6 filters.
1544 1 : if g.rng.Float64() <= 0.1 {
1545 1 : opts.useL6Filters = !opts.useL6Filters
1546 1 : }
1547 : }
1548 : }
1549 :
1550 1 : func (g *generator) cmp(a, b []byte) int {
1551 1 : return g.keyManager.comparer.Compare(a, b)
1552 1 : }
1553 :
1554 1 : func (g *generator) prefix(a []byte) []byte {
1555 1 : n := g.keyManager.comparer.Split(a)
1556 1 : return a[:n:n]
1557 1 : }
1558 :
1559 0 : func (g *generator) String() string {
1560 0 : var buf bytes.Buffer
1561 0 : for _, op := range g.ops {
1562 0 : fmt.Fprintf(&buf, "%s\n", op)
1563 0 : }
1564 0 : return buf.String()
1565 : }
1566 :
1567 : // expRandInt returns a random non-negative integer using the exponential
1568 : // distribution with the given mean. This is useful when we usually want to test
1569 : // with small values, but we want to occasionally test with a larger value.
1570 : //
1571 : // Large integers are exponentially less likely than small integers;
1572 : // specifically, the probability decreases by a factor of `e` every `mean`
1573 : // values.
1574 1 : func (g *generator) expRandInt(mean int) int {
1575 1 : return int(math.Round(g.rng.ExpFloat64() * float64(mean)))
1576 1 : }
|