Line data Source code
1 : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package metamorphic
6 :
7 : import (
8 : "bytes"
9 : "fmt"
10 : "math"
11 : "os"
12 : "slices"
13 :
14 : "github.com/cockroachdb/pebble"
15 : "github.com/cockroachdb/pebble/internal/randvar"
16 : "github.com/cockroachdb/pebble/sstable"
17 : "golang.org/x/exp/rand"
18 : )
19 :
20 : const maxValueSize = 20
21 :
22 : type iterOpts struct {
23 : lower []byte
24 : upper []byte
25 : keyTypes uint32 // pebble.IterKeyType
26 : // maskSuffix may be set if keyTypes is IterKeyTypePointsAndRanges to
27 : // configure IterOptions.RangeKeyMasking.Suffix.
28 : maskSuffix []byte
29 :
30 : // If filterMax is >0, this iterator will filter out any keys that have
31 : // suffixes that don't fall within the range [filterMin,filterMax).
32 : // Additionally, the iterator will be constructed with a block-property
33 : // filter that filters out blocks accordingly. Not all OPTIONS hook up the
34 : // corresponding block property collector, so block-filtering may still be
35 : // effectively disabled in some runs. The iterator operations themselves
36 : // however will always skip past any points that should be filtered to
37 : // ensure determinism.
38 : filterMin uint64
39 : filterMax uint64
40 :
41 : // see IterOptions.UseL6Filters.
42 : useL6Filters bool
43 :
44 : // NB: If adding or removing fields, ensure IsZero is in sync.
45 : }
46 :
47 2 : func (o iterOpts) IsZero() bool {
48 2 : return o.lower == nil && o.upper == nil && o.keyTypes == 0 &&
49 2 : o.maskSuffix == nil && o.filterMin == 0 && o.filterMax == 0 && !o.useL6Filters
50 2 : }
51 :
52 : // GenerateOps generates n random operations, drawing randomness from the
53 : // provided pseudorandom generator and using cfg to determine the distribution
54 : // of op types.
55 1 : func GenerateOps(rng *rand.Rand, n uint64, cfg OpConfig) Ops {
56 1 : // Generate a new set of random ops, writing them to <dir>/ops. These will be
57 1 : // read by the child processes when performing a test run.
58 1 : return generate(rng, n, cfg, newKeyManager(1 /* num instances */))
59 1 : }
60 :
61 : type generator struct {
62 : cfg OpConfig
63 : rng *rand.Rand
64 :
65 : init *initOp
66 : ops []op
67 :
68 : // keyManager tracks the state of keys a operation generation time.
69 : keyManager *keyManager
70 : keyGenerator *keyGenerator
71 : dbs objIDSlice
72 : // Unordered sets of object IDs for live objects. Used to randomly select on
73 : // object when generating an operation. There are 4 concrete objects: the DB
74 : // (of which there is exactly 1), batches, iterators, and snapshots.
75 : //
76 : // liveBatches contains the live indexed and write-only batches.
77 : liveBatches objIDSlice
78 : // liveIters contains the live iterators.
79 : liveIters objIDSlice
80 : itersLastOpts map[objID]iterOpts
81 : // liveReaders contains the DB, and any live indexed batches and snapshots. The DB is always
82 : // at index 0.
83 : liveReaders objIDSlice
84 : // liveSnapshots contains the live snapshots.
85 : liveSnapshots objIDSlice
86 : // liveWriters contains the DB, and any live batches. The DB is always at index 0.
87 : liveWriters objIDSlice
88 : // externalObjects contains the external objects created.
89 : externalObjects objIDSlice
90 :
91 : // Maps used to find associated objects during generation. These maps are not
92 : // needed during test execution.
93 : //
94 : // batchID -> batch iters: used to keep track of the open iterators on an
95 : // indexed batch. The iter set value will also be indexed by the readers map.
96 : batches map[objID]objIDSet
97 : // iterID -> reader iters: used to keep track of all of the open
98 : // iterators. The iter set value will also be indexed by either the batches
99 : // or snapshots maps.
100 : iters map[objID]objIDSet
101 : // objectID -> db: used to keep track of the DB a batch, iter, or snapshot
102 : // was created on. It should be read through the dbIDForObj method.
103 : objDB map[objID]objID
104 : // readerID -> reader iters: used to keep track of the open iterators on a
105 : // reader. The iter set value will also be indexed by either the batches or
106 : // snapshots maps. This map is the union of batches and snapshots maps.
107 : readers map[objID]objIDSet
108 : // snapshotID -> snapshot iters: used to keep track of the open iterators on
109 : // a snapshot. The iter set value will also be indexed by the readers map.
110 : snapshots map[objID]objIDSet
111 : // snapshotID -> bounds of the snapshot: only populated for snapshots that
112 : // are constrained by bounds.
113 : snapshotBounds map[objID][]pebble.KeyRange
114 : // iterSequenceNumber is the metaTimestamp at which the iter was created.
115 : iterCreationTimestamp map[objID]int
116 : // iterReaderID is a map from an iterID to a readerID.
117 : iterReaderID map[objID]objID
118 : }
119 :
120 1 : func newGenerator(rng *rand.Rand, cfg OpConfig, km *keyManager) *generator {
121 1 : keyGenerator := newKeyGenerator(km, rng, cfg)
122 1 : g := &generator{
123 1 : cfg: cfg,
124 1 : rng: rng,
125 1 : init: &initOp{dbSlots: uint32(cfg.numInstances)},
126 1 : keyManager: km,
127 1 : keyGenerator: keyGenerator,
128 1 : liveReaders: objIDSlice{makeObjID(dbTag, 1)},
129 1 : liveWriters: objIDSlice{makeObjID(dbTag, 1)},
130 1 : dbs: objIDSlice{makeObjID(dbTag, 1)},
131 1 : objDB: make(map[objID]objID),
132 1 : batches: make(map[objID]objIDSet),
133 1 : iters: make(map[objID]objIDSet),
134 1 : readers: make(map[objID]objIDSet),
135 1 : snapshots: make(map[objID]objIDSet),
136 1 : snapshotBounds: make(map[objID][]pebble.KeyRange),
137 1 : itersLastOpts: make(map[objID]iterOpts),
138 1 : iterCreationTimestamp: make(map[objID]int),
139 1 : iterReaderID: make(map[objID]objID),
140 1 : }
141 1 : for i := 1; i < cfg.numInstances; i++ {
142 1 : g.liveReaders = append(g.liveReaders, makeObjID(dbTag, uint32(i+1)))
143 1 : g.liveWriters = append(g.liveWriters, makeObjID(dbTag, uint32(i+1)))
144 1 : g.dbs = append(g.dbs, makeObjID(dbTag, uint32(i+1)))
145 1 : }
146 : // Note that the initOp fields are populated during generation.
147 1 : g.ops = append(g.ops, g.init)
148 1 : return g
149 : }
150 :
151 1 : func generate(rng *rand.Rand, count uint64, cfg OpConfig, km *keyManager) []op {
152 1 : g := newGenerator(rng, cfg, km)
153 1 :
154 1 : opGenerators := []func(){
155 1 : OpBatchAbort: g.batchAbort,
156 1 : OpBatchCommit: g.batchCommit,
157 1 : OpDBCheckpoint: g.dbCheckpoint,
158 1 : OpDBCompact: g.dbCompact,
159 1 : OpDBDownload: g.dbDownload,
160 1 : OpDBFlush: g.dbFlush,
161 1 : OpDBRatchetFormatMajorVersion: g.dbRatchetFormatMajorVersion,
162 1 : OpDBRestart: g.dbRestart,
163 1 : OpIterClose: g.randIter(g.iterClose),
164 1 : OpIterFirst: g.randIter(g.iterFirst),
165 1 : OpIterLast: g.randIter(g.iterLast),
166 1 : OpIterNext: g.randIter(g.iterNext),
167 1 : OpIterNextWithLimit: g.randIter(g.iterNextWithLimit),
168 1 : OpIterNextPrefix: g.randIter(g.iterNextPrefix),
169 1 : OpIterCanSingleDelete: g.randIter(g.iterCanSingleDelete),
170 1 : OpIterPrev: g.randIter(g.iterPrev),
171 1 : OpIterPrevWithLimit: g.randIter(g.iterPrevWithLimit),
172 1 : OpIterSeekGE: g.randIter(g.iterSeekGE),
173 1 : OpIterSeekGEWithLimit: g.randIter(g.iterSeekGEWithLimit),
174 1 : OpIterSeekLT: g.randIter(g.iterSeekLT),
175 1 : OpIterSeekLTWithLimit: g.randIter(g.iterSeekLTWithLimit),
176 1 : OpIterSeekPrefixGE: g.randIter(g.iterSeekPrefixGE),
177 1 : OpIterSetBounds: g.randIter(g.iterSetBounds),
178 1 : OpIterSetOptions: g.randIter(g.iterSetOptions),
179 1 : OpNewBatch: g.newBatch,
180 1 : OpNewIndexedBatch: g.newIndexedBatch,
181 1 : OpNewIter: g.newIter,
182 1 : OpNewIterUsingClone: g.newIterUsingClone,
183 1 : OpNewSnapshot: g.newSnapshot,
184 1 : OpNewExternalObj: g.newExternalObj,
185 1 : OpReaderGet: g.readerGet,
186 1 : OpReplicate: g.replicate,
187 1 : OpSnapshotClose: g.snapshotClose,
188 1 : OpWriterApply: g.writerApply,
189 1 : OpWriterDelete: g.writerDelete,
190 1 : OpWriterDeleteRange: g.writerDeleteRange,
191 1 : OpWriterIngest: g.writerIngest,
192 1 : OpWriterIngestAndExcise: g.writerIngestAndExcise,
193 1 : OpWriterIngestExternalFiles: g.writerIngestExternalFiles,
194 1 : OpWriterLogData: g.writerLogData,
195 1 : OpWriterMerge: g.writerMerge,
196 1 : OpWriterRangeKeyDelete: g.writerRangeKeyDelete,
197 1 : OpWriterRangeKeySet: g.writerRangeKeySet,
198 1 : OpWriterRangeKeyUnset: g.writerRangeKeyUnset,
199 1 : OpWriterSet: g.writerSet,
200 1 : OpWriterSingleDelete: g.writerSingleDelete,
201 1 : }
202 1 :
203 1 : // TPCC-style deck of cards randomization. Every time the end of the deck is
204 1 : // reached, we shuffle the deck.
205 1 : deck := randvar.NewDeck(g.rng, cfg.ops[:]...)
206 1 :
207 1 : defer func() {
208 1 : if r := recover(); r != nil {
209 0 : fmt.Fprintln(os.Stderr, formatOps(g.ops))
210 0 : panic(r)
211 : }
212 : }()
213 1 : for i := uint64(0); i < count; i++ {
214 1 : opGenerators[deck.Int()]()
215 1 : }
216 :
217 1 : g.dbClose()
218 1 :
219 1 : computeDerivedFields(g.ops)
220 1 : return g.ops
221 : }
222 :
223 1 : func (g *generator) add(op op) {
224 1 : g.ops = append(g.ops, op)
225 1 : g.keyManager.update(op)
226 1 : }
227 :
228 : // prefixKeyRange generates a [start, end) pair consisting of two prefix keys.
229 1 : func (g *generator) prefixKeyRange() ([]byte, []byte) {
230 1 : keys := g.keyGenerator.UniqueKeys(2, func() []byte { return g.keyGenerator.RandPrefix(0.01) })
231 1 : return keys[0], keys[1]
232 : }
233 :
234 1 : func (g *generator) randKeyToSingleDelete(id objID) []byte {
235 1 : keys := g.keyManager.eligibleSingleDeleteKeys(id)
236 1 : length := len(keys)
237 1 : if length == 0 {
238 0 : return nil
239 0 : }
240 1 : return keys[g.rng.Intn(length)]
241 : }
242 :
243 1 : func resizeBuffer(buf []byte, prefixLen, suffixLen int) []byte {
244 1 : if cap(buf) >= prefixLen+suffixLen {
245 0 : return buf[:prefixLen+suffixLen]
246 0 : }
247 1 : return make([]byte, prefixLen+suffixLen)
248 : }
249 :
250 1 : func (g *generator) newBatch() {
251 1 : batchID := makeObjID(batchTag, g.init.batchSlots)
252 1 : g.init.batchSlots++
253 1 : g.liveBatches = append(g.liveBatches, batchID)
254 1 : g.liveWriters = append(g.liveWriters, batchID)
255 1 : dbID := g.dbs.rand(g.rng)
256 1 : g.objDB[batchID] = dbID
257 1 :
258 1 : g.add(&newBatchOp{
259 1 : dbID: dbID,
260 1 : batchID: batchID,
261 1 : })
262 1 : }
263 :
264 1 : func (g *generator) newIndexedBatch() {
265 1 : batchID := makeObjID(batchTag, g.init.batchSlots)
266 1 : g.init.batchSlots++
267 1 : g.liveBatches = append(g.liveBatches, batchID)
268 1 : g.liveReaders = append(g.liveReaders, batchID)
269 1 : g.liveWriters = append(g.liveWriters, batchID)
270 1 :
271 1 : iters := make(objIDSet)
272 1 : g.batches[batchID] = iters
273 1 : g.readers[batchID] = iters
274 1 : dbID := g.dbs.rand(g.rng)
275 1 : g.objDB[batchID] = dbID
276 1 :
277 1 : g.add(&newIndexedBatchOp{
278 1 : dbID: dbID,
279 1 : batchID: batchID,
280 1 : })
281 1 : }
282 :
283 : // removeFromBatchGenerator will not generate a closeOp for the target batch as
284 : // not every batch that is removed from the generator should be closed. For
285 : // example, running a closeOp before an ingestOp that contains the closed batch
286 : // will cause an error.
287 1 : func (g *generator) removeBatchFromGenerator(batchID objID) {
288 1 : g.liveBatches.remove(batchID)
289 1 : iters := g.batches[batchID]
290 1 : delete(g.batches, batchID)
291 1 :
292 1 : if iters != nil {
293 1 : g.liveReaders.remove(batchID)
294 1 : delete(g.readers, batchID)
295 1 : }
296 1 : g.liveWriters.remove(batchID)
297 1 : for _, id := range iters.sorted() {
298 1 : g.liveIters.remove(id)
299 1 : delete(g.iters, id)
300 1 : g.add(&closeOp{objID: id})
301 1 : }
302 : }
303 :
304 1 : func (g *generator) batchAbort() {
305 1 : if len(g.liveBatches) == 0 {
306 1 : return
307 1 : }
308 :
309 1 : batchID := g.liveBatches.rand(g.rng)
310 1 : g.removeBatchFromGenerator(batchID)
311 1 :
312 1 : g.add(&closeOp{objID: batchID})
313 : }
314 :
315 1 : func (g *generator) batchCommit() {
316 1 : if len(g.liveBatches) == 0 {
317 1 : return
318 1 : }
319 :
320 1 : batchID := g.liveBatches.rand(g.rng)
321 1 : dbID := g.dbIDForObj(batchID)
322 1 : g.removeBatchFromGenerator(batchID)
323 1 :
324 1 : // The batch we're applying may contain single delete tombstones that when
325 1 : // applied to the writer result in nondeterminism in the deleted key. If
326 1 : // that's the case, we can restore determinism by first deleting the key
327 1 : // from the writer.
328 1 : //
329 1 : // Generating additional operations here is not ideal, but it simplifies
330 1 : // single delete invariants significantly.
331 1 : singleDeleteConflicts := g.keyManager.checkForSingleDelConflicts(batchID, dbID, false /* collapsed */)
332 1 : for _, conflict := range singleDeleteConflicts {
333 0 : g.add(&deleteOp{
334 0 : writerID: dbID,
335 0 : key: conflict,
336 0 : derivedDBID: dbID,
337 0 : })
338 0 : }
339 :
340 1 : g.add(&batchCommitOp{
341 1 : dbID: dbID,
342 1 : batchID: batchID,
343 1 : })
344 1 : g.add(&closeOp{objID: batchID})
345 :
346 : }
347 :
348 1 : func (g *generator) dbClose() {
349 1 : // Close any live iterators and snapshots, so that we can close the DB
350 1 : // cleanly.
351 1 : for len(g.liveIters) > 0 {
352 1 : g.randIter(g.iterClose)()
353 1 : }
354 1 : for len(g.liveSnapshots) > 0 {
355 1 : g.snapshotClose()
356 1 : }
357 1 : for len(g.liveBatches) > 0 {
358 0 : batchID := g.liveBatches[0]
359 0 : g.removeBatchFromGenerator(batchID)
360 0 : g.add(&closeOp{objID: batchID})
361 0 : }
362 1 : for len(g.dbs) > 0 {
363 1 : db := g.dbs[0]
364 1 : g.dbs = g.dbs[1:]
365 1 : g.add(&closeOp{objID: db})
366 1 : }
367 : }
368 :
369 1 : func (g *generator) dbCheckpoint() {
370 1 : numSpans := g.expRandInt(1)
371 1 : var spans []pebble.CheckpointSpan
372 1 : if numSpans > 0 {
373 1 : spans = make([]pebble.CheckpointSpan, numSpans)
374 1 : }
375 1 : for i := range spans {
376 1 : start := g.keyGenerator.RandKey(0.01)
377 1 : end := g.keyGenerator.RandKey(0.01)
378 1 : if g.cmp(start, end) > 0 {
379 1 : start, end = end, start
380 1 : }
381 1 : spans[i].Start = start
382 1 : spans[i].End = end
383 : }
384 1 : dbID := g.dbs.rand(g.rng)
385 1 : g.add(&checkpointOp{
386 1 : dbID: dbID,
387 1 : spans: spans,
388 1 : })
389 : }
390 :
391 1 : func (g *generator) dbCompact() {
392 1 : // Generate new key(s) with a 1% probability.
393 1 : start := g.keyGenerator.RandKey(0.01)
394 1 : end := g.keyGenerator.RandKey(0.01)
395 1 : if g.cmp(start, end) > 0 {
396 1 : start, end = end, start
397 1 : }
398 1 : dbID := g.dbs.rand(g.rng)
399 1 : g.add(&compactOp{
400 1 : dbID: dbID,
401 1 : start: start,
402 1 : end: end,
403 1 : parallelize: g.rng.Float64() < 0.5,
404 1 : })
405 : }
406 :
407 1 : func (g *generator) dbDownload() {
408 1 : numSpans := 1 + g.expRandInt(1)
409 1 : spans := make([]pebble.DownloadSpan, numSpans)
410 1 : for i := range spans {
411 1 : keys := g.keyGenerator.UniqueKeys(2, func() []byte { return g.keyGenerator.RandKey(0.001) })
412 1 : start, end := keys[0], keys[1]
413 1 : spans[i].StartKey = start
414 1 : spans[i].EndKey = end
415 1 : spans[i].ViaBackingFileDownload = g.rng.Intn(2) == 0
416 : }
417 1 : dbID := g.dbs.rand(g.rng)
418 1 : g.add(&downloadOp{
419 1 : dbID: dbID,
420 1 : spans: spans,
421 1 : })
422 : }
423 :
424 1 : func (g *generator) dbFlush() {
425 1 : g.add(&flushOp{g.dbs.rand(g.rng)})
426 1 : }
427 :
428 1 : func (g *generator) dbRatchetFormatMajorVersion() {
429 1 : // Ratchet to a random format major version between the minimum the
430 1 : // metamorphic tests support and the newest. At runtime, the generated
431 1 : // version may be behind the database's format major version, in which case
432 1 : // RatchetFormatMajorVersion should deterministically error.
433 1 :
434 1 : dbID := g.dbs.rand(g.rng)
435 1 : n := int(newestFormatMajorVersionToTest - minimumFormatMajorVersion)
436 1 : vers := pebble.FormatMajorVersion(g.rng.Intn(n+1)) + minimumFormatMajorVersion
437 1 : g.add(&dbRatchetFormatMajorVersionOp{dbID: dbID, vers: vers})
438 1 : }
439 :
440 1 : func (g *generator) dbRestart() {
441 1 : // Close any live iterators and snapshots, so that we can close the DB
442 1 : // cleanly.
443 1 : dbID := g.dbs.rand(g.rng)
444 1 : for len(g.liveIters) > 0 {
445 1 : g.randIter(g.iterClose)()
446 1 : }
447 1 : for len(g.liveSnapshots) > 0 {
448 1 : g.snapshotClose()
449 1 : }
450 : // Close the batches.
451 1 : for len(g.liveBatches) > 0 {
452 1 : batchID := g.liveBatches[0]
453 1 : g.removeBatchFromGenerator(batchID)
454 1 : g.add(&closeOp{objID: batchID})
455 1 : }
456 1 : if len(g.liveReaders) != len(g.dbs) || len(g.liveWriters) != len(g.dbs) {
457 0 : panic(fmt.Sprintf("unexpected counts: liveReaders %d, liveWriters: %d",
458 0 : len(g.liveReaders), len(g.liveWriters)))
459 : }
460 1 : g.add(&dbRestartOp{dbID: dbID})
461 : }
462 :
463 : // maybeSetSnapshotIterBounds must be called whenever creating a new iterator or
464 : // modifying the bounds of an iterator. If the iterator is backed by a snapshot
465 : // that only guarantees consistency within a limited set of key spans, then the
466 : // iterator must set bounds within one of the snapshot's consistent keyspans. It
467 : // returns true if the provided readerID is a bounded snapshot and bounds were
468 : // set.
469 1 : func (g *generator) maybeSetSnapshotIterBounds(readerID objID, opts *iterOpts) bool {
470 1 : snapBounds, isBoundedSnapshot := g.snapshotBounds[readerID]
471 1 : if !isBoundedSnapshot {
472 1 : return false
473 1 : }
474 : // Pick a random keyrange within one of the snapshot's key ranges.
475 1 : parentBounds := pickOneUniform(g.rng, snapBounds)
476 1 : // With 10% probability, use the parent start bound as-is.
477 1 : if g.rng.Float64() <= 0.1 {
478 1 : opts.lower = parentBounds.Start
479 1 : } else {
480 1 : opts.lower = g.keyGenerator.RandKeyInRange(0.1, parentBounds)
481 1 : }
482 : // With 10% probability, use the parent end bound as-is.
483 1 : if g.rng.Float64() <= 0.1 {
484 1 : opts.upper = parentBounds.End
485 1 : } else {
486 1 : opts.upper = g.keyGenerator.RandKeyInRange(0.1, pebble.KeyRange{
487 1 : Start: opts.lower,
488 1 : End: parentBounds.End,
489 1 : })
490 1 : }
491 1 : return true
492 : }
493 :
494 1 : func (g *generator) newIter() {
495 1 : iterID := makeObjID(iterTag, g.init.iterSlots)
496 1 : g.init.iterSlots++
497 1 : g.liveIters = append(g.liveIters, iterID)
498 1 :
499 1 : readerID := g.liveReaders.rand(g.rng)
500 1 : if iters := g.readers[readerID]; iters != nil {
501 1 : iters[iterID] = struct{}{}
502 1 : g.iters[iterID] = iters
503 1 : //lint:ignore SA9003 - readability
504 1 : } else {
505 1 : // NB: the DB object does not track its open iterators because it never
506 1 : // closes.
507 1 : }
508 1 : g.iterReaderID[iterID] = readerID
509 1 : dbID := g.deriveDB(iterID)
510 1 :
511 1 : var opts iterOpts
512 1 : if !g.maybeSetSnapshotIterBounds(readerID, &opts) {
513 1 : // Generate lower/upper bounds with a 10% probability.
514 1 : if g.rng.Float64() <= 0.1 {
515 1 : // Generate a new key with a .1% probability.
516 1 : opts.lower = g.keyGenerator.RandKey(0.001)
517 1 : }
518 1 : if g.rng.Float64() <= 0.1 {
519 1 : // Generate a new key with a .1% probability.
520 1 : opts.upper = g.keyGenerator.RandKey(0.001)
521 1 : }
522 1 : if g.cmp(opts.lower, opts.upper) > 0 {
523 1 : opts.lower, opts.upper = opts.upper, opts.lower
524 1 : }
525 : }
526 1 : opts.keyTypes, opts.maskSuffix = g.randKeyTypesAndMask()
527 1 :
528 1 : // With 10% probability, enable automatic filtering of keys with suffixes
529 1 : // not in the provided range. This filtering occurs both through
530 1 : // block-property filtering and explicitly within the iterator operations to
531 1 : // ensure determinism.
532 1 : if g.rng.Float64() <= 0.1 {
533 1 : opts.filterMin = uint64(g.keyGenerator.UniformSuffixInt() + 1)
534 1 : opts.filterMax = uint64(g.keyGenerator.UniformSuffixInt() + 1)
535 1 : if opts.filterMin > opts.filterMax {
536 1 : opts.filterMin, opts.filterMax = opts.filterMax, opts.filterMin
537 1 : } else if opts.filterMin == opts.filterMax {
538 1 : opts.filterMax++
539 1 : }
540 : }
541 :
542 : // Enable L6 filters with a 10% probability.
543 1 : if g.rng.Float64() <= 0.1 {
544 1 : opts.useL6Filters = true
545 1 : }
546 :
547 1 : g.itersLastOpts[iterID] = opts
548 1 : g.iterCreationTimestamp[iterID] = g.keyManager.nextMetaTimestamp()
549 1 : g.iterReaderID[iterID] = readerID
550 1 : g.add(&newIterOp{
551 1 : readerID: readerID,
552 1 : iterID: iterID,
553 1 : iterOpts: opts,
554 1 : derivedDBID: dbID,
555 1 : })
556 : }
557 :
558 1 : func (g *generator) randKeyTypesAndMask() (keyTypes uint32, maskSuffix []byte) {
559 1 : // Iterate over different key types.
560 1 : p := g.rng.Float64()
561 1 : switch {
562 1 : case p < 0.2: // 20% probability
563 1 : keyTypes = uint32(pebble.IterKeyTypePointsOnly)
564 1 : case p < 0.8: // 60% probability
565 1 : keyTypes = uint32(pebble.IterKeyTypePointsAndRanges)
566 1 : // With 50% probability, enable masking.
567 1 : if g.rng.Intn(2) == 1 {
568 1 : maskSuffix = g.keyGenerator.UniformSuffix()
569 1 : }
570 1 : default: // 20% probability
571 1 : keyTypes = uint32(pebble.IterKeyTypeRangesOnly)
572 : }
573 1 : return keyTypes, maskSuffix
574 : }
575 :
576 1 : func (g *generator) deriveDB(readerID objID) objID {
577 1 : dbParentID := readerID
578 1 : if readerID.tag() == iterTag {
579 1 : dbParentID = g.iterReaderID[readerID]
580 1 : }
581 1 : if dbParentID.tag() != dbTag {
582 1 : dbParentID = g.dbIDForObj(dbParentID)
583 1 : }
584 1 : g.objDB[readerID] = dbParentID
585 1 : return dbParentID
586 : }
587 :
588 1 : func (g *generator) dbIDForObj(objID objID) objID {
589 1 : if g.objDB[objID] == 0 {
590 0 : panic(fmt.Sprintf("object %s has no associated DB", objID))
591 : }
592 1 : return g.objDB[objID]
593 : }
594 :
595 1 : func (g *generator) newIterUsingClone() {
596 1 : if len(g.liveIters) == 0 {
597 1 : return
598 1 : }
599 1 : existingIterID := g.liveIters.rand(g.rng)
600 1 : iterID := makeObjID(iterTag, g.init.iterSlots)
601 1 : g.init.iterSlots++
602 1 : g.liveIters = append(g.liveIters, iterID)
603 1 : if iters := g.iters[existingIterID]; iters != nil {
604 1 : iters[iterID] = struct{}{}
605 1 : g.iters[iterID] = iters
606 1 : //lint:ignore SA9003 - readability
607 1 : } else {
608 1 : // NB: the DB object does not track its open iterators because it never
609 1 : // closes.
610 1 : }
611 1 : readerID := g.iterReaderID[existingIterID]
612 1 : g.iterReaderID[iterID] = readerID
613 1 : g.deriveDB(iterID)
614 1 :
615 1 : var refreshBatch bool
616 1 : if readerID.tag() == batchTag {
617 0 : refreshBatch = g.rng.Intn(2) == 1
618 0 : }
619 :
620 1 : opts := g.itersLastOpts[existingIterID]
621 1 : // With 50% probability, consider modifying the iterator options used by the
622 1 : // clone.
623 1 : if g.rng.Intn(2) == 1 {
624 1 : g.maybeMutateOptions(readerID, &opts)
625 1 : }
626 1 : g.itersLastOpts[iterID] = opts
627 1 :
628 1 : g.iterCreationTimestamp[iterID] = g.keyManager.nextMetaTimestamp()
629 1 : g.iterReaderID[iterID] = g.iterReaderID[existingIterID]
630 1 : g.add(&newIterUsingCloneOp{
631 1 : existingIterID: existingIterID,
632 1 : iterID: iterID,
633 1 : refreshBatch: refreshBatch,
634 1 : iterOpts: opts,
635 1 : derivedReaderID: readerID,
636 1 : })
637 : }
638 :
639 1 : func (g *generator) iterClose(iterID objID) {
640 1 : g.liveIters.remove(iterID)
641 1 : if readerIters, ok := g.iters[iterID]; ok {
642 1 : delete(g.iters, iterID)
643 1 : delete(readerIters, iterID)
644 1 : }
645 :
646 1 : g.add(&closeOp{objID: iterID})
647 : }
648 :
649 1 : func (g *generator) iterSetBounds(iterID objID) {
650 1 : iterLastOpts := g.itersLastOpts[iterID]
651 1 : newOpts := iterLastOpts
652 1 : // TODO(jackson): The logic to increase the probability of advancing bounds
653 1 : // monotonically only applies if the snapshot is not bounded. Refactor to
654 1 : // allow bounded snapshots to benefit too, when possible.
655 1 : if !g.maybeSetSnapshotIterBounds(g.iterReaderID[iterID], &newOpts) {
656 1 : var lower, upper []byte
657 1 : genLower := g.rng.Float64() <= 0.9
658 1 : genUpper := g.rng.Float64() <= 0.9
659 1 : // When one of ensureLowerGE, ensureUpperLE is true, the new bounds
660 1 : // don't overlap with the previous bounds.
661 1 : var ensureLowerGE, ensureUpperLE bool
662 1 : if genLower && iterLastOpts.upper != nil && g.rng.Float64() <= 0.9 {
663 1 : ensureLowerGE = true
664 1 : }
665 1 : if (!ensureLowerGE || g.rng.Float64() < 0.5) && genUpper && iterLastOpts.lower != nil {
666 1 : ensureUpperLE = true
667 1 : ensureLowerGE = false
668 1 : }
669 1 : attempts := 0
670 1 : for {
671 1 : attempts++
672 1 : if genLower {
673 1 : // Generate a new key with a .1% probability.
674 1 : lower = g.keyGenerator.RandKey(0.001)
675 1 : }
676 1 : if genUpper {
677 1 : // Generate a new key with a .1% probability.
678 1 : upper = g.keyGenerator.RandKey(0.001)
679 1 : }
680 1 : if g.cmp(lower, upper) > 0 {
681 1 : lower, upper = upper, lower
682 1 : }
683 1 : if ensureLowerGE && g.cmp(iterLastOpts.upper, lower) > 0 {
684 1 : if attempts < 25 {
685 1 : continue
686 : }
687 1 : lower = iterLastOpts.upper
688 1 : upper = lower
689 1 : break
690 : }
691 1 : if ensureUpperLE && g.cmp(upper, iterLastOpts.lower) > 0 {
692 1 : if attempts < 25 {
693 1 : continue
694 : }
695 1 : upper = iterLastOpts.lower
696 1 : lower = upper
697 1 : break
698 : }
699 1 : break
700 : }
701 1 : newOpts.lower = lower
702 1 : newOpts.upper = upper
703 : }
704 1 : g.itersLastOpts[iterID] = newOpts
705 1 : g.add(&iterSetBoundsOp{
706 1 : iterID: iterID,
707 1 : lower: newOpts.lower,
708 1 : upper: newOpts.upper,
709 1 : })
710 1 : // Additionally seek the iterator in a manner consistent with the bounds,
711 1 : // and do some steps (Next/Prev). The seeking exercises typical
712 1 : // CockroachDB behavior when using iterators and the steps are trying to
713 1 : // stress the region near the bounds. Ideally, we should not do this as
714 1 : // part of generating a single op, but this is easier than trying to
715 1 : // control future op generation via generator state.
716 1 : doSeekLT := newOpts.upper != nil && g.rng.Float64() < 0.5
717 1 : doSeekGE := newOpts.lower != nil && g.rng.Float64() < 0.5
718 1 : if doSeekLT && doSeekGE {
719 1 : // Pick the seek.
720 1 : if g.rng.Float64() < 0.5 {
721 1 : doSeekGE = false
722 1 : } else {
723 1 : doSeekLT = false
724 1 : }
725 : }
726 1 : if doSeekLT {
727 1 : g.add(&iterSeekLTOp{
728 1 : iterID: iterID,
729 1 : key: newOpts.upper,
730 1 : derivedReaderID: g.iterReaderID[iterID],
731 1 : })
732 1 : if g.rng.Float64() < 0.5 {
733 1 : g.iterNext(iterID)
734 1 : }
735 1 : if g.rng.Float64() < 0.5 {
736 1 : g.iterNext(iterID)
737 1 : }
738 1 : if g.rng.Float64() < 0.5 {
739 1 : g.iterPrev(iterID)
740 1 : }
741 1 : } else if doSeekGE {
742 1 : g.add(&iterSeekGEOp{
743 1 : iterID: iterID,
744 1 : key: newOpts.lower,
745 1 : derivedReaderID: g.iterReaderID[iterID],
746 1 : })
747 1 : if g.rng.Float64() < 0.5 {
748 1 : g.iterPrev(iterID)
749 1 : }
750 1 : if g.rng.Float64() < 0.5 {
751 1 : g.iterPrev(iterID)
752 1 : }
753 1 : if g.rng.Float64() < 0.5 {
754 1 : g.iterNext(iterID)
755 1 : }
756 : }
757 : }
758 :
759 1 : func (g *generator) iterSetOptions(iterID objID) {
760 1 : opts := g.itersLastOpts[iterID]
761 1 : g.maybeMutateOptions(g.iterReaderID[iterID], &opts)
762 1 : g.itersLastOpts[iterID] = opts
763 1 : g.add(&iterSetOptionsOp{
764 1 : iterID: iterID,
765 1 : iterOpts: opts,
766 1 : derivedReaderID: g.iterReaderID[iterID],
767 1 : })
768 1 :
769 1 : // Additionally, perform a random absolute positioning operation. The
770 1 : // SetOptions contract requires one before the next relative positioning
771 1 : // operation. Ideally, we should not do this as part of generating a single
772 1 : // op, but this is easier than trying to control future op generation via
773 1 : // generator state.
774 1 : pickOneUniform(
775 1 : g.rng,
776 1 : []func(objID){
777 1 : g.iterFirst,
778 1 : g.iterLast,
779 1 : g.iterSeekGE,
780 1 : g.iterSeekGEWithLimit,
781 1 : g.iterSeekPrefixGE,
782 1 : g.iterSeekLT,
783 1 : g.iterSeekLTWithLimit,
784 1 : },
785 1 : )(iterID)
786 1 : }
787 :
788 1 : func (g *generator) iterSeekGE(iterID objID) {
789 1 : g.add(&iterSeekGEOp{
790 1 : iterID: iterID,
791 1 : key: g.keyGenerator.RandKey(0.001), // 0.1% new keys
792 1 : derivedReaderID: g.iterReaderID[iterID],
793 1 : })
794 1 : }
795 :
796 1 : func (g *generator) iterSeekGEWithLimit(iterID objID) {
797 1 : // 0.1% new keys
798 1 : key, limit := g.keyGenerator.RandKey(0.001), g.keyGenerator.RandKey(0.001)
799 1 : if g.cmp(key, limit) > 0 {
800 1 : key, limit = limit, key
801 1 : }
802 1 : g.add(&iterSeekGEOp{
803 1 : iterID: iterID,
804 1 : key: key,
805 1 : limit: limit,
806 1 : derivedReaderID: g.iterReaderID[iterID],
807 1 : })
808 : }
809 :
810 1 : func (g *generator) iterSeekPrefixGE(iterID objID) {
811 1 : lower := g.itersLastOpts[iterID].lower
812 1 : upper := g.itersLastOpts[iterID].upper
813 1 : iterCreationTimestamp := g.iterCreationTimestamp[iterID]
814 1 : var key []byte
815 1 :
816 1 : // We try to make sure that the SeekPrefixGE key is within the iter bounds,
817 1 : // and that the iter can read the key. If the key was created on a batch
818 1 : // which deleted the key, then the key will still be considered visible
819 1 : // by the current logic. We're also not accounting for keys written to
820 1 : // batches which haven't been presisted to the DB. But we're only picking
821 1 : // keys in a best effort manner, and the logic is better than picking a
822 1 : // random key.
823 1 : if g.rng.Intn(10) >= 1 {
824 1 : possibleKeys := make([][]byte, 0, 100)
825 1 : inRangeKeys := g.keyManager.InRangeKeysForObj(g.dbIDForObj(iterID), lower, upper)
826 1 : for _, keyMeta := range inRangeKeys {
827 1 : visibleHistory := keyMeta.history.before(iterCreationTimestamp)
828 1 :
829 1 : // Check if the last op on this key set a value, (eg SETs, MERGEs).
830 1 : // If the key should be visible to the iterator and it would make a
831 1 : // good candidate for a SeekPrefixGE.
832 1 : if visibleHistory.hasVisibleValue() {
833 1 : possibleKeys = append(possibleKeys, keyMeta.key)
834 1 : }
835 : }
836 :
837 1 : if len(possibleKeys) > 0 {
838 1 : key = possibleKeys[g.rng.Int31n(int32(len(possibleKeys)))]
839 1 : }
840 : }
841 :
842 1 : if key == nil {
843 1 : // TODO(bananabrick): We should try and use keys within the bounds,
844 1 : // even if we couldn't find any keys visible to the iterator. However,
845 1 : // doing this in experiments didn't really increase the valid
846 1 : // SeekPrefixGE calls by much.
847 1 : key = g.keyGenerator.RandKey(0) // 0% new keys
848 1 : }
849 :
850 1 : g.add(&iterSeekPrefixGEOp{
851 1 : iterID: iterID,
852 1 : key: key,
853 1 : derivedReaderID: g.iterReaderID[iterID],
854 1 : })
855 : }
856 :
857 1 : func (g *generator) iterSeekLT(iterID objID) {
858 1 : g.add(&iterSeekLTOp{
859 1 : iterID: iterID,
860 1 : key: g.keyGenerator.RandKey(0.001), // 0.1% new keys
861 1 : derivedReaderID: g.iterReaderID[iterID],
862 1 : })
863 1 : }
864 :
865 1 : func (g *generator) iterSeekLTWithLimit(iterID objID) {
866 1 : // 0.1% new keys
867 1 : key, limit := g.keyGenerator.RandKey(0.001), g.keyGenerator.RandKey(0.001)
868 1 : if g.cmp(limit, key) > 0 {
869 1 : key, limit = limit, key
870 1 : }
871 1 : g.add(&iterSeekLTOp{
872 1 : iterID: iterID,
873 1 : key: key,
874 1 : limit: limit,
875 1 : derivedReaderID: g.iterReaderID[iterID],
876 1 : })
877 : }
878 :
879 : // randIter performs partial func application ("currying"), returning a new
880 : // function that supplies the given func with a random iterator.
881 1 : func (g *generator) randIter(gen func(objID)) func() {
882 1 : return func() {
883 1 : if len(g.liveIters) == 0 {
884 1 : return
885 1 : }
886 1 : gen(g.liveIters.rand(g.rng))
887 : }
888 : }
889 :
890 1 : func (g *generator) iterFirst(iterID objID) {
891 1 : g.add(&iterFirstOp{
892 1 : iterID: iterID,
893 1 : derivedReaderID: g.iterReaderID[iterID],
894 1 : })
895 1 : }
896 :
897 1 : func (g *generator) iterLast(iterID objID) {
898 1 : g.add(&iterLastOp{
899 1 : iterID: iterID,
900 1 : derivedReaderID: g.iterReaderID[iterID],
901 1 : })
902 1 : }
903 :
904 1 : func (g *generator) iterNext(iterID objID) {
905 1 : g.add(&iterNextOp{
906 1 : iterID: iterID,
907 1 : derivedReaderID: g.iterReaderID[iterID],
908 1 : })
909 1 : }
910 :
911 1 : func (g *generator) iterPrev(iterID objID) {
912 1 : g.add(&iterPrevOp{
913 1 : iterID: iterID,
914 1 : derivedReaderID: g.iterReaderID[iterID],
915 1 : })
916 1 : }
917 :
918 1 : func (g *generator) iterNextWithLimit(iterID objID) {
919 1 : g.add(&iterNextOp{
920 1 : iterID: iterID,
921 1 : limit: g.keyGenerator.RandKey(0.001), // 0.1% new keys
922 1 : derivedReaderID: g.iterReaderID[iterID],
923 1 : })
924 1 : }
925 :
926 1 : func (g *generator) iterNextPrefix(iterID objID) {
927 1 : g.add(&iterNextPrefixOp{
928 1 : iterID: iterID,
929 1 : derivedReaderID: g.iterReaderID[iterID],
930 1 : })
931 1 : }
932 :
933 1 : func (g *generator) iterCanSingleDelete(iterID objID) {
934 1 : g.add(&iterCanSingleDelOp{
935 1 : iterID: iterID,
936 1 : derivedReaderID: g.iterReaderID[iterID],
937 1 : })
938 1 : }
939 :
940 1 : func (g *generator) iterPrevWithLimit(iterID objID) {
941 1 : g.add(&iterPrevOp{
942 1 : iterID: iterID,
943 1 : limit: g.keyGenerator.RandKey(0.001), // 0.1% new keys
944 1 : derivedReaderID: g.iterReaderID[iterID],
945 1 : })
946 1 : }
947 :
948 1 : func (g *generator) readerGet() {
949 1 : if len(g.liveReaders) == 0 {
950 0 : return
951 0 : }
952 :
953 1 : readerID := g.liveReaders.rand(g.rng)
954 1 :
955 1 : // If the chosen reader is a snapshot created with user-specified key
956 1 : // ranges, restrict the read to fall within one of the provided key ranges.
957 1 : var key []byte
958 1 : if bounds := g.snapshotBounds[readerID]; len(bounds) > 0 {
959 1 : kr := bounds[g.rng.Intn(len(bounds))]
960 1 : key = g.keyGenerator.RandKeyInRange(0.001, kr) // 0.1% new keys
961 1 : } else {
962 1 : key = g.keyGenerator.RandKey(0.001) // 0.1% new keys
963 1 : }
964 1 : derivedDBID := objID(0)
965 1 : if readerID.tag() == batchTag || readerID.tag() == snapTag {
966 1 : derivedDBID = g.deriveDB(readerID)
967 1 : }
968 1 : g.add(&getOp{readerID: readerID, key: key, derivedDBID: derivedDBID})
969 : }
970 :
971 1 : func (g *generator) replicate() {
972 1 : if len(g.dbs) < 2 {
973 0 : return
974 0 : }
975 :
976 1 : source := g.dbs.rand(g.rng)
977 1 : dest := source
978 1 : for dest == source {
979 1 : dest = g.dbs.rand(g.rng)
980 1 : }
981 :
982 1 : startKey, endKey := g.prefixKeyRange()
983 1 : g.add(&replicateOp{
984 1 : source: source,
985 1 : dest: dest,
986 1 : start: startKey,
987 1 : end: endKey,
988 1 : })
989 : }
990 :
991 : // generateDisjointKeyRanges generates n disjoint key ranges.
992 1 : func (g *generator) generateDisjointKeyRanges(n int) []pebble.KeyRange {
993 1 : keys := g.keyGenerator.UniqueKeys(2*n, func() []byte { return g.keyGenerator.RandPrefix(0.1) })
994 1 : keyRanges := make([]pebble.KeyRange, n)
995 1 : for i := range keyRanges {
996 1 : keyRanges[i] = pebble.KeyRange{
997 1 : Start: keys[i*2],
998 1 : End: keys[i*2+1],
999 1 : }
1000 1 : }
1001 1 : return keyRanges
1002 : }
1003 :
1004 1 : func (g *generator) newSnapshot() {
1005 1 : snapID := makeObjID(snapTag, g.init.snapshotSlots)
1006 1 : g.init.snapshotSlots++
1007 1 : g.liveSnapshots = append(g.liveSnapshots, snapID)
1008 1 : g.liveReaders = append(g.liveReaders, snapID)
1009 1 : dbID := g.dbs.rand(g.rng)
1010 1 : g.objDB[snapID] = dbID
1011 1 :
1012 1 : iters := make(objIDSet)
1013 1 : g.snapshots[snapID] = iters
1014 1 : g.readers[snapID] = iters
1015 1 :
1016 1 : s := &newSnapshotOp{
1017 1 : dbID: dbID,
1018 1 : snapID: snapID,
1019 1 : }
1020 1 :
1021 1 : // Impose bounds on the keys that may be read with the snapshot. Setting bounds
1022 1 : // allows some runs of the metamorphic test to use a EventuallyFileOnlySnapshot
1023 1 : // instead of a Snapshot, testing equivalence between the two for reads within
1024 1 : // those bounds.
1025 1 : s.bounds = g.generateDisjointKeyRanges(
1026 1 : 1 + g.expRandInt(3),
1027 1 : )
1028 1 : g.snapshotBounds[snapID] = s.bounds
1029 1 : g.add(s)
1030 1 : }
1031 :
1032 1 : func (g *generator) snapshotClose() {
1033 1 : if len(g.liveSnapshots) == 0 {
1034 1 : return
1035 1 : }
1036 :
1037 1 : snapID := g.liveSnapshots.rand(g.rng)
1038 1 : g.liveSnapshots.remove(snapID)
1039 1 : iters := g.snapshots[snapID]
1040 1 : delete(g.snapshots, snapID)
1041 1 : g.liveReaders.remove(snapID)
1042 1 : delete(g.readers, snapID)
1043 1 :
1044 1 : for _, id := range iters.sorted() {
1045 1 : g.liveIters.remove(id)
1046 1 : delete(g.iters, id)
1047 1 : g.add(&closeOp{objID: id})
1048 1 : }
1049 :
1050 1 : g.add(&closeOp{objID: snapID})
1051 : }
1052 :
1053 1 : func (g *generator) newExternalObj() {
1054 1 : if len(g.liveBatches) == 0 {
1055 1 : return
1056 1 : }
1057 1 : var batchID objID
1058 1 : // Try to find a suitable batch.
1059 1 : for i := 0; ; i++ {
1060 1 : if i == 10 {
1061 1 : return
1062 1 : }
1063 1 : batchID = g.liveBatches.rand(g.rng)
1064 1 : okm := g.keyManager.objKeyMeta(batchID)
1065 1 : // #3287: IngestExternalFiles currently doesn't support range keys.
1066 1 : if !okm.bounds.IsUnset() && !okm.hasRangeKeys {
1067 1 : break
1068 : }
1069 : }
1070 1 : g.removeBatchFromGenerator(batchID)
1071 1 : objID := makeObjID(externalObjTag, g.init.externalObjSlots)
1072 1 : g.init.externalObjSlots++
1073 1 : g.externalObjects = append(g.externalObjects, objID)
1074 1 : g.add(&newExternalObjOp{
1075 1 : batchID: batchID,
1076 1 : externalObjID: objID,
1077 1 : })
1078 : }
1079 :
1080 1 : func (g *generator) writerApply() {
1081 1 : if len(g.liveBatches) == 0 {
1082 1 : return
1083 1 : }
1084 1 : if len(g.liveWriters) < 2 {
1085 0 : panic(fmt.Sprintf("insufficient liveWriters (%d) to apply batch", len(g.liveWriters)))
1086 : }
1087 :
1088 1 : batchID := g.liveBatches.rand(g.rng)
1089 1 : dbID := g.dbIDForObj(batchID)
1090 1 :
1091 1 : var writerID objID
1092 1 : for {
1093 1 : // NB: The writer we're applying to, as well as the batch we're applying,
1094 1 : // must be from the same DB. The writer could be the db itself. Applying
1095 1 : // a batch from one DB on another DB results in a panic, so avoid that.
1096 1 : writerID = g.liveWriters.rand(g.rng)
1097 1 : writerDBID := writerID
1098 1 : if writerID.tag() != dbTag {
1099 1 : writerDBID = g.dbIDForObj(writerID)
1100 1 : }
1101 1 : if writerID != batchID && writerDBID == dbID {
1102 1 : break
1103 : }
1104 : }
1105 :
1106 : // The batch we're applying may contain single delete tombstones that when
1107 : // applied to the writer result in nondeterminism in the deleted key. If
1108 : // that's the case, we can restore determinism by first deleting the key
1109 : // from the writer.
1110 : //
1111 : // Generating additional operations here is not ideal, but it simplifies
1112 : // single delete invariants significantly.
1113 1 : singleDeleteConflicts := g.keyManager.checkForSingleDelConflicts(batchID, writerID, false /* collapsed */)
1114 1 : for _, conflict := range singleDeleteConflicts {
1115 0 : g.add(&deleteOp{
1116 0 : writerID: writerID,
1117 0 : key: conflict,
1118 0 : derivedDBID: dbID,
1119 0 : })
1120 0 : }
1121 :
1122 1 : g.removeBatchFromGenerator(batchID)
1123 1 :
1124 1 : g.add(&applyOp{
1125 1 : writerID: writerID,
1126 1 : batchID: batchID,
1127 1 : })
1128 1 : g.add(&closeOp{
1129 1 : objID: batchID,
1130 1 : })
1131 : }
1132 :
1133 1 : func (g *generator) writerDelete() {
1134 1 : if len(g.liveWriters) == 0 {
1135 0 : return
1136 0 : }
1137 :
1138 1 : writerID := g.liveWriters.rand(g.rng)
1139 1 : derivedDBID := writerID
1140 1 : if derivedDBID.tag() != dbTag {
1141 1 : derivedDBID = g.dbIDForObj(writerID)
1142 1 : }
1143 1 : g.add(&deleteOp{
1144 1 : writerID: writerID,
1145 1 : key: g.keyGenerator.RandKey(0.001), // 0.1% new keys
1146 1 : derivedDBID: derivedDBID,
1147 1 : })
1148 : }
1149 :
1150 1 : func (g *generator) writerDeleteRange() {
1151 1 : if len(g.liveWriters) == 0 {
1152 0 : return
1153 0 : }
1154 :
1155 1 : keys := g.keyGenerator.UniqueKeys(2, func() []byte { return g.keyGenerator.RandKey(0.001) })
1156 1 : start, end := keys[0], keys[1]
1157 1 :
1158 1 : writerID := g.liveWriters.rand(g.rng)
1159 1 : g.add(&deleteRangeOp{
1160 1 : writerID: writerID,
1161 1 : start: start,
1162 1 : end: end,
1163 1 : })
1164 : }
1165 :
1166 1 : func (g *generator) writerRangeKeyDelete() {
1167 1 : if len(g.liveWriters) == 0 {
1168 0 : return
1169 0 : }
1170 1 : start, end := g.prefixKeyRange()
1171 1 :
1172 1 : writerID := g.liveWriters.rand(g.rng)
1173 1 : g.add(&rangeKeyDeleteOp{
1174 1 : writerID: writerID,
1175 1 : start: start,
1176 1 : end: end,
1177 1 : })
1178 : }
1179 :
1180 1 : func (g *generator) writerRangeKeySet() {
1181 1 : if len(g.liveWriters) == 0 {
1182 0 : return
1183 0 : }
1184 1 : start, end := g.prefixKeyRange()
1185 1 :
1186 1 : // 90% of the time, set a suffix.
1187 1 : var suffix []byte
1188 1 : if g.rng.Float64() < 0.90 {
1189 1 : // Increase the max suffix 5% of the time.
1190 1 : suffix = g.keyGenerator.SkewedSuffix(0.05)
1191 1 : }
1192 :
1193 1 : writerID := g.liveWriters.rand(g.rng)
1194 1 : g.add(&rangeKeySetOp{
1195 1 : writerID: writerID,
1196 1 : start: start,
1197 1 : end: end,
1198 1 : suffix: suffix,
1199 1 : value: randBytes(g.rng, 0, maxValueSize),
1200 1 : })
1201 : }
1202 :
1203 1 : func (g *generator) writerRangeKeyUnset() {
1204 1 : if len(g.liveWriters) == 0 {
1205 0 : return
1206 0 : }
1207 1 : start, end := g.prefixKeyRange()
1208 1 :
1209 1 : // 90% of the time, set a suffix.
1210 1 : var suffix []byte
1211 1 : if g.rng.Float64() < 0.90 {
1212 1 : // Increase the max suffix 5% of the time.
1213 1 : suffix = g.keyGenerator.SkewedSuffix(0.05)
1214 1 : }
1215 :
1216 : // TODO(jackson): Increase probability of effective unsets? Purely random
1217 : // unsets are unlikely to remove an active range key.
1218 :
1219 1 : writerID := g.liveWriters.rand(g.rng)
1220 1 : g.add(&rangeKeyUnsetOp{
1221 1 : writerID: writerID,
1222 1 : start: start,
1223 1 : end: end,
1224 1 : suffix: suffix,
1225 1 : })
1226 : }
1227 :
1228 1 : func (g *generator) writerIngest() {
1229 1 : if len(g.liveBatches) == 0 {
1230 1 : return
1231 1 : }
1232 :
1233 1 : dbID := g.dbs.rand(g.rng)
1234 1 : n := min(1+g.expRandInt(1), len(g.liveBatches))
1235 1 : batchIDs := make([]objID, n)
1236 1 : derivedDBIDs := make([]objID, n)
1237 1 : for i := 0; i < n; i++ {
1238 1 : batchID := g.liveBatches.rand(g.rng)
1239 1 : batchIDs[i] = batchID
1240 1 : derivedDBIDs[i] = g.dbIDForObj(batchID)
1241 1 : g.removeBatchFromGenerator(batchID)
1242 1 : }
1243 :
1244 : // Ingestions may fail if the ingested sstables overlap one another.
1245 : // Either it succeeds and its keys are committed to the DB, or it fails and
1246 : // the keys are not committed.
1247 1 : if !g.keyManager.doObjectBoundsOverlap(batchIDs) {
1248 1 : // This ingestion will succeed.
1249 1 : //
1250 1 : // The batches we're ingesting may contain single delete tombstones that
1251 1 : // when applied to the writer result in nondeterminism in the deleted key.
1252 1 : // If that's the case, we can restore determinism by first deleting the keys
1253 1 : // from the writer.
1254 1 : //
1255 1 : // Generating additional operations here is not ideal, but it simplifies
1256 1 : // single delete invariants significantly.
1257 1 : for _, batchID := range batchIDs {
1258 1 : singleDeleteConflicts := g.keyManager.checkForSingleDelConflicts(batchID, dbID, true /* collapsed */)
1259 1 : for _, conflict := range singleDeleteConflicts {
1260 1 : g.add(&deleteOp{
1261 1 : writerID: dbID,
1262 1 : key: conflict,
1263 1 : derivedDBID: dbID,
1264 1 : })
1265 1 : }
1266 : }
1267 : }
1268 1 : g.add(&ingestOp{
1269 1 : dbID: dbID,
1270 1 : batchIDs: batchIDs,
1271 1 : derivedDBIDs: derivedDBIDs,
1272 1 : })
1273 : }
1274 :
1275 1 : func (g *generator) writerIngestAndExcise() {
1276 1 : if len(g.liveBatches) == 0 {
1277 1 : return
1278 1 : }
1279 :
1280 1 : dbID := g.dbs.rand(g.rng)
1281 1 : batchID := g.liveBatches.rand(g.rng)
1282 1 : g.removeBatchFromGenerator(batchID)
1283 1 :
1284 1 : start, end := g.prefixKeyRange()
1285 1 : derivedDBID := g.dbIDForObj(batchID)
1286 1 :
1287 1 : // Check for any single delete conflicts. If this batch is single-deleting
1288 1 : // a key that isn't safe to single delete in the underlying db, _and_ this
1289 1 : // key is not in the excise span, we add a delete before the ingestAndExcise.
1290 1 : singleDeleteConflicts := g.keyManager.checkForSingleDelConflicts(batchID, dbID, true /* collapsed */)
1291 1 : for _, conflict := range singleDeleteConflicts {
1292 0 : if g.cmp(conflict, start) >= 0 && g.cmp(conflict, end) < 0 {
1293 0 : // This key will get excised anyway.
1294 0 : continue
1295 : }
1296 0 : g.add(&deleteOp{
1297 0 : writerID: dbID,
1298 0 : key: conflict,
1299 0 : derivedDBID: dbID,
1300 0 : })
1301 : }
1302 :
1303 1 : g.add(&ingestAndExciseOp{
1304 1 : dbID: dbID,
1305 1 : batchID: batchID,
1306 1 : derivedDBID: derivedDBID,
1307 1 : exciseStart: start,
1308 1 : exciseEnd: end,
1309 1 : sstContainsExciseTombstone: g.rng.Intn(2) == 0,
1310 1 : })
1311 : }
1312 :
1313 1 : func (g *generator) writerIngestExternalFiles() {
1314 1 : if len(g.externalObjects) == 0 {
1315 1 : return
1316 1 : }
1317 1 : dbID := g.dbs.rand(g.rng)
1318 1 : numFiles := 1 + g.expRandInt(1)
1319 1 : objs := make([]externalObjWithBounds, numFiles)
1320 1 :
1321 1 : // We generate the parameters in multiple passes:
1322 1 : // 1. Generate objs with random start and end keys. Their bounds can overlap.
1323 1 : // 2. Sort objects by the start bound and trim the bounds to remove overlap.
1324 1 : // 3. Remove any objects where the previous step resulted in empty bounds.
1325 1 : // 4. Randomly add synthetic suffixes.
1326 1 :
1327 1 : for i := range objs {
1328 1 : // We allow the same object to be selected multiple times.
1329 1 : id := g.externalObjects.rand(g.rng)
1330 1 : b := g.keyManager.objKeyMeta(id).bounds
1331 1 :
1332 1 : objStart := g.prefix(b.smallest)
1333 1 : objEnd := g.prefix(b.largest)
1334 1 : if !b.largestExcl || len(objEnd) != len(b.largest) {
1335 1 : // Move up the end key a bit by appending a few letters to the prefix.
1336 1 : objEnd = append(objEnd, randBytes(g.rng, 1, 3)...)
1337 1 : }
1338 1 : if g.cmp(objStart, objEnd) >= 0 {
1339 0 : panic("bug in generating obj bounds")
1340 : }
1341 : // Generate two random keys within the given bounds.
1342 : // First, generate a start key in the range [objStart, objEnd).
1343 1 : start := g.keyGenerator.RandKeyInRange(0.01, pebble.KeyRange{
1344 1 : Start: objStart,
1345 1 : End: objEnd,
1346 1 : })
1347 1 : start = g.prefix(start)
1348 1 : // Second, generate an end key in the range (start, objEnd]. To do this, we
1349 1 : // generate a key in the range [start, objEnd) and if we get `start`, we
1350 1 : // remap that to `objEnd`.
1351 1 : end := g.keyGenerator.RandKeyInRange(0.01, pebble.KeyRange{
1352 1 : Start: start,
1353 1 : End: objEnd,
1354 1 : })
1355 1 : end = g.prefix(end)
1356 1 : if g.cmp(start, end) == 0 {
1357 1 : end = objEnd
1358 1 : }
1359 : // Randomly set up synthetic prefix.
1360 1 : var syntheticPrefix sstable.SyntheticPrefix
1361 1 : // We can only use a synthetic prefix if we don't have range dels.
1362 1 : // TODO(radu): we will want to support this at some point.
1363 1 : if !g.keyManager.objKeyMeta(id).hasRangeDels && g.rng.Intn(2) == 0 {
1364 1 : syntheticPrefix = randBytes(g.rng, 1, 5)
1365 1 : start = syntheticPrefix.Apply(start)
1366 1 : end = syntheticPrefix.Apply(end)
1367 1 : }
1368 :
1369 1 : objs[i] = externalObjWithBounds{
1370 1 : externalObjID: id,
1371 1 : bounds: pebble.KeyRange{
1372 1 : Start: start,
1373 1 : End: end,
1374 1 : },
1375 1 : syntheticPrefix: syntheticPrefix,
1376 1 : }
1377 : }
1378 :
1379 : // Sort by start bound.
1380 1 : slices.SortFunc(objs, func(a, b externalObjWithBounds) int {
1381 1 : return g.cmp(a.bounds.Start, b.bounds.Start)
1382 1 : })
1383 :
1384 : // Trim bounds so that there is no overlap.
1385 1 : for i := 0; i < len(objs)-1; i++ {
1386 1 : if g.cmp(objs[i].bounds.End, objs[i+1].bounds.Start) > 0 {
1387 1 : objs[i].bounds.End = objs[i+1].bounds.Start
1388 1 : }
1389 : }
1390 : // Some bounds might be empty now, remove those objects altogether. Note that
1391 : // the last object is unmodified, so at least that object will remain.
1392 1 : objs = slices.DeleteFunc(objs, func(o externalObjWithBounds) bool {
1393 1 : return g.cmp(o.bounds.Start, o.bounds.End) >= 0
1394 1 : })
1395 :
1396 : // Randomly set synthetic suffixes.
1397 1 : for i := range objs {
1398 1 : if g.rng.Intn(2) == 0 {
1399 1 : // We can only use a synthetic suffix if we don't have range dels.
1400 1 : // TODO(radu): we will want to support this at some point.
1401 1 : if g.keyManager.objKeyMeta(objs[i].externalObjID).hasRangeDels {
1402 0 : continue
1403 : }
1404 :
1405 : // We can only use a synthetic suffix if we don't have multiple keys with
1406 : // the same prefix.
1407 1 : hasDuplicatePrefix := func() bool {
1408 1 : var prevPrefix []byte
1409 1 : for _, k := range g.keyManager.KeysForExternalIngest(objs[i]) {
1410 1 : prefix := g.prefix(k.key)
1411 1 : if g.cmp(prefix, prevPrefix) == 0 {
1412 0 : return true
1413 0 : }
1414 1 : prevPrefix = append(prevPrefix[:0], prefix...)
1415 : }
1416 1 : return false
1417 : }()
1418 1 : if hasDuplicatePrefix {
1419 0 : continue
1420 : }
1421 :
1422 : // Generate a suffix that sorts before any previously generated suffix.
1423 1 : objs[i].syntheticSuffix = g.keyGenerator.IncMaxSuffix()
1424 : }
1425 : }
1426 :
1427 : // The batches we're ingesting may contain single delete tombstones that when
1428 : // applied to the db result in nondeterminism in the deleted key. If that's
1429 : // the case, we can restore determinism by first deleting the keys from the
1430 : // db.
1431 : //
1432 : // Generating additional operations here is not ideal, but it simplifies
1433 : // single delete invariants significantly.
1434 1 : dbKeys := g.keyManager.objKeyMeta(dbID)
1435 1 : for _, o := range objs {
1436 1 : for _, src := range g.keyManager.KeysForExternalIngest(o) {
1437 1 : if g.keyManager.checkForSingleDelConflict(src, dbKeys) {
1438 0 : g.add(&deleteOp{
1439 0 : writerID: dbID,
1440 0 : key: src.key,
1441 0 : derivedDBID: dbID,
1442 0 : })
1443 0 : }
1444 : }
1445 : }
1446 :
1447 : // Shuffle the objects.
1448 1 : g.rng.Shuffle(len(objs), func(i, j int) {
1449 1 : objs[i], objs[j] = objs[j], objs[i]
1450 1 : })
1451 :
1452 1 : g.add(&ingestExternalFilesOp{
1453 1 : dbID: dbID,
1454 1 : objs: objs,
1455 1 : })
1456 : }
1457 :
1458 1 : func (g *generator) writerLogData() {
1459 1 : if len(g.liveWriters) == 0 {
1460 0 : return
1461 0 : }
1462 1 : g.add(&logDataOp{
1463 1 : writerID: g.liveWriters.rand(g.rng),
1464 1 : data: randBytes(g.rng, 0, g.expRandInt(10)),
1465 1 : })
1466 : }
1467 :
1468 1 : func (g *generator) writerMerge() {
1469 1 : if len(g.liveWriters) == 0 {
1470 0 : return
1471 0 : }
1472 :
1473 1 : writerID := g.liveWriters.rand(g.rng)
1474 1 : g.add(&mergeOp{
1475 1 : writerID: writerID,
1476 1 : // 20% new keys.
1477 1 : key: g.keyGenerator.RandKey(0.2),
1478 1 : value: randBytes(g.rng, 0, maxValueSize),
1479 1 : })
1480 : }
1481 :
1482 1 : func (g *generator) writerSet() {
1483 1 : if len(g.liveWriters) == 0 {
1484 0 : return
1485 0 : }
1486 :
1487 1 : writerID := g.liveWriters.rand(g.rng)
1488 1 : g.add(&setOp{
1489 1 : writerID: writerID,
1490 1 : // 50% new keys.
1491 1 : key: g.keyGenerator.RandKey(0.5),
1492 1 : value: randBytes(g.rng, 0, maxValueSize),
1493 1 : })
1494 : }
1495 :
1496 1 : func (g *generator) writerSingleDelete() {
1497 1 : if len(g.liveWriters) == 0 {
1498 0 : return
1499 0 : }
1500 :
1501 1 : writerID := g.liveWriters.rand(g.rng)
1502 1 : key := g.randKeyToSingleDelete(writerID)
1503 1 : if key == nil {
1504 0 : return
1505 0 : }
1506 1 : g.add(&singleDeleteOp{
1507 1 : writerID: writerID,
1508 1 : key: key,
1509 1 : // Keys eligible for single deletes can be removed with a regular
1510 1 : // delete. Mutate a percentage of SINGLEDEL ops into DELETEs. Note that
1511 1 : // here we are only determining whether the replacement *could* happen.
1512 1 : // At test runtime, the `replaceSingleDelete` test option must also be
1513 1 : // set to true for the single delete to be replaced.
1514 1 : maybeReplaceDelete: g.rng.Float64() < 0.25,
1515 1 : })
1516 : }
1517 :
1518 1 : func (g *generator) maybeMutateOptions(readerID objID, opts *iterOpts) {
1519 1 : // With 95% probability, allow changes to any options at all. This ensures
1520 1 : // that in 5% of cases there are no changes, and SetOptions hits its fast
1521 1 : // path.
1522 1 : if g.rng.Intn(100) >= 5 {
1523 1 : if !g.maybeSetSnapshotIterBounds(readerID, opts) {
1524 1 : // With 1/3 probability, clear existing bounds.
1525 1 : if opts.lower != nil && g.rng.Intn(3) == 0 {
1526 1 : opts.lower = nil
1527 1 : }
1528 1 : if opts.upper != nil && g.rng.Intn(3) == 0 {
1529 1 : opts.upper = nil
1530 1 : }
1531 : // With 1/3 probability, update the bounds.
1532 1 : if g.rng.Intn(3) == 0 {
1533 1 : // Generate a new key with a .1% probability.
1534 1 : opts.lower = g.keyGenerator.RandKey(0.001)
1535 1 : }
1536 1 : if g.rng.Intn(3) == 0 {
1537 1 : // Generate a new key with a .1% probability.
1538 1 : opts.upper = g.keyGenerator.RandKey(0.001)
1539 1 : }
1540 1 : if g.cmp(opts.lower, opts.upper) > 0 {
1541 1 : opts.lower, opts.upper = opts.upper, opts.lower
1542 1 : }
1543 : }
1544 :
1545 : // With 1/3 probability, update the key-types/mask.
1546 1 : if g.rng.Intn(3) == 0 {
1547 1 : opts.keyTypes, opts.maskSuffix = g.randKeyTypesAndMask()
1548 1 : }
1549 :
1550 : // With 1/3 probability, clear existing filter.
1551 1 : if opts.filterMax > 0 && g.rng.Intn(3) == 0 {
1552 1 : opts.filterMax, opts.filterMin = 0, 0
1553 1 : }
1554 : // With 10% probability, set a filter range.
1555 1 : if g.rng.Intn(10) == 1 {
1556 1 : opts.filterMin = uint64(g.keyGenerator.UniformSuffixInt() + 1)
1557 1 : opts.filterMax = uint64(g.keyGenerator.UniformSuffixInt() + 1)
1558 1 : if opts.filterMin > opts.filterMax {
1559 1 : opts.filterMin, opts.filterMax = opts.filterMax, opts.filterMin
1560 1 : } else if opts.filterMin == opts.filterMax {
1561 1 : opts.filterMax = opts.filterMin + 1
1562 1 : }
1563 : }
1564 : // With 10% probability, flip enablement of L6 filters.
1565 1 : if g.rng.Float64() <= 0.1 {
1566 1 : opts.useL6Filters = !opts.useL6Filters
1567 1 : }
1568 : }
1569 : }
1570 :
1571 1 : func (g *generator) cmp(a, b []byte) int {
1572 1 : return g.keyManager.comparer.Compare(a, b)
1573 1 : }
1574 :
1575 1 : func (g *generator) prefix(a []byte) []byte {
1576 1 : n := g.keyManager.comparer.Split(a)
1577 1 : return a[:n:n]
1578 1 : }
1579 :
1580 0 : func (g *generator) String() string {
1581 0 : var buf bytes.Buffer
1582 0 : for _, op := range g.ops {
1583 0 : fmt.Fprintf(&buf, "%s\n", op)
1584 0 : }
1585 0 : return buf.String()
1586 : }
1587 :
1588 : // expRandInt returns a random non-negative integer using the exponential
1589 : // distribution with the given mean. This is useful when we usually want to test
1590 : // with small values, but we want to occasionally test with a larger value.
1591 : //
1592 : // Large integers are exponentially less likely than small integers;
1593 : // specifically, the probability decreases by a factor of `e` every `mean`
1594 : // values.
1595 1 : func (g *generator) expRandInt(mean int) int {
1596 1 : return int(math.Round(g.rng.ExpFloat64() * float64(mean)))
1597 1 : }
|