LCOV - code coverage report
Current view: top level - pebble/metamorphic - generator.go (source / functions) Hit Total Coverage
Test: 2024-01-18 08:16Z 4b750a69 - meta test only.lcov Lines: 4 1248 0.3 %
Date: 2024-01-18 08:17:16 Functions: 0 0 -

          Line data    Source code
       1             : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
       2             : // of this source code is governed by a BSD-style license that can be found in
       3             : // the LICENSE file.
       4             : 
       5             : package metamorphic
       6             : 
       7             : import (
       8             :         "bytes"
       9             :         "fmt"
      10             :         "os"
      11             :         "slices"
      12             : 
      13             :         "github.com/cockroachdb/pebble"
      14             :         "github.com/cockroachdb/pebble/internal/randvar"
      15             :         "github.com/cockroachdb/pebble/internal/testkeys"
      16             :         "golang.org/x/exp/rand"
      17             : )
      18             : 
      19             : const maxValueSize = 20
      20             : 
      21             : type iterOpts struct {
      22             :         lower    []byte
      23             :         upper    []byte
      24             :         keyTypes uint32 // pebble.IterKeyType
      25             :         // maskSuffix may be set if keyTypes is IterKeyTypePointsAndRanges to
      26             :         // configure IterOptions.RangeKeyMasking.Suffix.
      27             :         maskSuffix []byte
      28             : 
      29             :         // If filterMax is >0, this iterator will filter out any keys that have
      30             :         // suffixes that don't fall within the range [filterMin,filterMax).
      31             :         // Additionally, the iterator will be constructed with a block-property
      32             :         // filter that filters out blocks accordingly. Not all OPTIONS hook up the
      33             :         // corresponding block property collector, so block-filtering may still be
      34             :         // effectively disabled in some runs. The iterator operations themselves
      35             :         // however will always skip past any points that should be filtered to
      36             :         // ensure determinism.
      37             :         filterMin uint64
      38             :         filterMax uint64
      39             : 
      40             :         // see IterOptions.UseL6Filters.
      41             :         useL6Filters bool
      42             : 
      43             :         // NB: If adding or removing fields, ensure IsZero is in sync.
      44             : }
      45             : 
      46           1 : func (o iterOpts) IsZero() bool {
      47           1 :         return o.lower == nil && o.upper == nil && o.keyTypes == 0 &&
      48           1 :                 o.maskSuffix == nil && o.filterMin == 0 && o.filterMax == 0 && !o.useL6Filters
      49           1 : }
      50             : 
      51             : // GenerateOps generates n random operations, drawing randomness from the
      52             : // provided pseudorandom generator and using cfg to determine the distribution
      53             : // of op types.
      54           0 : func GenerateOps(rng *rand.Rand, n uint64, cfg OpConfig) Ops {
      55           0 :         // Generate a new set of random ops, writing them to <dir>/ops. These will be
      56           0 :         // read by the child processes when performing a test run.
      57           0 :         return generate(rng, n, cfg, newKeyManager(1 /* num instances */))
      58           0 : }
      59             : 
      60             : type generator struct {
      61             :         cfg OpConfig
      62             :         rng *rand.Rand
      63             : 
      64             :         init *initOp
      65             :         ops  []op
      66             : 
      67             :         // keyManager tracks the state of keys a operation generation time.
      68             :         keyManager *keyManager
      69             :         dbs        objIDSlice
      70             :         // Unordered sets of object IDs for live objects. Used to randomly select on
      71             :         // object when generating an operation. There are 4 concrete objects: the DB
      72             :         // (of which there is exactly 1), batches, iterators, and snapshots.
      73             :         //
      74             :         // liveBatches contains the live indexed and write-only batches.
      75             :         liveBatches objIDSlice
      76             :         // liveIters contains the live iterators.
      77             :         liveIters     objIDSlice
      78             :         itersLastOpts map[objID]iterOpts
      79             :         // liveReaders contains the DB, and any live indexed batches and snapshots. The DB is always
      80             :         // at index 0.
      81             :         liveReaders objIDSlice
      82             :         // liveSnapshots contains the live snapshots.
      83             :         liveSnapshots objIDSlice
      84             :         // liveWriters contains the DB, and any live batches. The DB is always at index 0.
      85             :         liveWriters objIDSlice
      86             : 
      87             :         // Maps used to find associated objects during generation. These maps are not
      88             :         // needed during test execution.
      89             :         //
      90             :         // batchID -> batch iters: used to keep track of the open iterators on an
      91             :         // indexed batch. The iter set value will also be indexed by the readers map.
      92             :         batches map[objID]objIDSet
      93             :         // iterID -> reader iters: used to keep track of all of the open
      94             :         // iterators. The iter set value will also be indexed by either the batches
      95             :         // or snapshots maps.
      96             :         iters map[objID]objIDSet
      97             :         // objectID -> db: used to keep track of the DB a batch, iter, or snapshot
      98             :         // was created on.
      99             :         objDB map[objID]objID
     100             :         // readerID -> reader iters: used to keep track of the open iterators on a
     101             :         // reader. The iter set value will also be indexed by either the batches or
     102             :         // snapshots maps. This map is the union of batches and snapshots maps.
     103             :         readers map[objID]objIDSet
     104             :         // snapshotID -> snapshot iters: used to keep track of the open iterators on
     105             :         // a snapshot. The iter set value will also be indexed by the readers map.
     106             :         snapshots map[objID]objIDSet
     107             :         // snapshotID -> bounds of the snapshot: only populated for snapshots that
     108             :         // are constrained by bounds.
     109             :         snapshotBounds map[objID][]pebble.KeyRange
     110             :         // iterSequenceNumber is the metaTimestamp at which the iter was created.
     111             :         iterCreationTimestamp map[objID]int
     112             :         // iterReaderID is a map from an iterID to a readerID.
     113             :         iterReaderID map[objID]objID
     114             : }
     115             : 
     116           0 : func newGenerator(rng *rand.Rand, cfg OpConfig, km *keyManager) *generator {
     117           0 :         g := &generator{
     118           0 :                 cfg:                   cfg,
     119           0 :                 rng:                   rng,
     120           0 :                 init:                  &initOp{dbSlots: uint32(cfg.numInstances)},
     121           0 :                 keyManager:            km,
     122           0 :                 liveReaders:           objIDSlice{makeObjID(dbTag, 1)},
     123           0 :                 liveWriters:           objIDSlice{makeObjID(dbTag, 1)},
     124           0 :                 dbs:                   objIDSlice{makeObjID(dbTag, 1)},
     125           0 :                 objDB:                 make(map[objID]objID),
     126           0 :                 batches:               make(map[objID]objIDSet),
     127           0 :                 iters:                 make(map[objID]objIDSet),
     128           0 :                 readers:               make(map[objID]objIDSet),
     129           0 :                 snapshots:             make(map[objID]objIDSet),
     130           0 :                 snapshotBounds:        make(map[objID][]pebble.KeyRange),
     131           0 :                 itersLastOpts:         make(map[objID]iterOpts),
     132           0 :                 iterCreationTimestamp: make(map[objID]int),
     133           0 :                 iterReaderID:          make(map[objID]objID),
     134           0 :         }
     135           0 :         for i := 1; i < cfg.numInstances; i++ {
     136           0 :                 g.liveReaders = append(g.liveReaders, makeObjID(dbTag, uint32(i+1)))
     137           0 :                 g.liveWriters = append(g.liveWriters, makeObjID(dbTag, uint32(i+1)))
     138           0 :                 g.dbs = append(g.dbs, makeObjID(dbTag, uint32(i+1)))
     139           0 :         }
     140             :         // Note that the initOp fields are populated during generation.
     141           0 :         g.ops = append(g.ops, g.init)
     142           0 :         return g
     143             : }
     144             : 
     145           0 : func generate(rng *rand.Rand, count uint64, cfg OpConfig, km *keyManager) []op {
     146           0 :         g := newGenerator(rng, cfg, km)
     147           0 : 
     148           0 :         generators := []func(){
     149           0 :                 OpBatchAbort:                  g.batchAbort,
     150           0 :                 OpBatchCommit:                 g.batchCommit,
     151           0 :                 OpDBCheckpoint:                g.dbCheckpoint,
     152           0 :                 OpDBCompact:                   g.dbCompact,
     153           0 :                 OpDBFlush:                     g.dbFlush,
     154           0 :                 OpDBRatchetFormatMajorVersion: g.dbRatchetFormatMajorVersion,
     155           0 :                 OpDBRestart:                   g.dbRestart,
     156           0 :                 OpIterClose:                   g.randIter(g.iterClose),
     157           0 :                 OpIterFirst:                   g.randIter(g.iterFirst),
     158           0 :                 OpIterLast:                    g.randIter(g.iterLast),
     159           0 :                 OpIterNext:                    g.randIter(g.iterNext),
     160           0 :                 OpIterNextWithLimit:           g.randIter(g.iterNextWithLimit),
     161           0 :                 OpIterNextPrefix:              g.randIter(g.iterNextPrefix),
     162           0 :                 OpIterCanSingleDelete:         g.randIter(g.iterCanSingleDelete),
     163           0 :                 OpIterPrev:                    g.randIter(g.iterPrev),
     164           0 :                 OpIterPrevWithLimit:           g.randIter(g.iterPrevWithLimit),
     165           0 :                 OpIterSeekGE:                  g.randIter(g.iterSeekGE),
     166           0 :                 OpIterSeekGEWithLimit:         g.randIter(g.iterSeekGEWithLimit),
     167           0 :                 OpIterSeekLT:                  g.randIter(g.iterSeekLT),
     168           0 :                 OpIterSeekLTWithLimit:         g.randIter(g.iterSeekLTWithLimit),
     169           0 :                 OpIterSeekPrefixGE:            g.randIter(g.iterSeekPrefixGE),
     170           0 :                 OpIterSetBounds:               g.randIter(g.iterSetBounds),
     171           0 :                 OpIterSetOptions:              g.randIter(g.iterSetOptions),
     172           0 :                 OpNewBatch:                    g.newBatch,
     173           0 :                 OpNewIndexedBatch:             g.newIndexedBatch,
     174           0 :                 OpNewIter:                     g.newIter,
     175           0 :                 OpNewIterUsingClone:           g.newIterUsingClone,
     176           0 :                 OpNewSnapshot:                 g.newSnapshot,
     177           0 :                 OpReaderGet:                   g.readerGet,
     178           0 :                 OpReplicate:                   g.replicate,
     179           0 :                 OpSnapshotClose:               g.snapshotClose,
     180           0 :                 OpWriterApply:                 g.writerApply,
     181           0 :                 OpWriterDelete:                g.writerDelete,
     182           0 :                 OpWriterDeleteRange:           g.writerDeleteRange,
     183           0 :                 OpWriterIngest:                g.writerIngest,
     184           0 :                 OpWriterIngestAndExcise:       g.writerIngestAndExcise,
     185           0 :                 OpWriterMerge:                 g.writerMerge,
     186           0 :                 OpWriterRangeKeyDelete:        g.writerRangeKeyDelete,
     187           0 :                 OpWriterRangeKeySet:           g.writerRangeKeySet,
     188           0 :                 OpWriterRangeKeyUnset:         g.writerRangeKeyUnset,
     189           0 :                 OpWriterSet:                   g.writerSet,
     190           0 :                 OpWriterSingleDelete:          g.writerSingleDelete,
     191           0 :         }
     192           0 : 
     193           0 :         // TPCC-style deck of cards randomization. Every time the end of the deck is
     194           0 :         // reached, we shuffle the deck.
     195           0 :         deck := randvar.NewDeck(g.rng, cfg.ops[:]...)
     196           0 : 
     197           0 :         defer func() {
     198           0 :                 if r := recover(); r != nil {
     199           0 :                         fmt.Fprintln(os.Stderr, formatOps(g.ops))
     200           0 :                         panic(r)
     201             :                 }
     202             :         }()
     203           0 :         for i := uint64(0); i < count; i++ {
     204           0 :                 generators[deck.Int()]()
     205           0 :         }
     206             : 
     207           0 :         g.dbClose()
     208           0 : 
     209           0 :         computeDerivedFields(g.ops)
     210           0 :         return g.ops
     211             : }
     212             : 
     213           0 : func (g *generator) add(op op) {
     214           0 :         g.ops = append(g.ops, op)
     215           0 :         g.keyManager.update(op)
     216           0 : }
     217             : 
     218             : // randKeyToWrite returns a key for any write other than SingleDelete.
     219             : //
     220             : // TODO(peter): make the size and distribution of keys configurable. See
     221             : // keyDist and keySizeDist in config.go.
     222           0 : func (g *generator) randKeyToWrite(newKey float64) []byte {
     223           0 :         return g.randKeyHelper(g.keyManager.knownKeys(), newKey, nil)
     224           0 : }
     225             : 
     226             : // prefixKeyRange generates a [start, end) pair consisting of two prefix keys.
     227           0 : func (g *generator) prefixKeyRange() ([]byte, []byte) {
     228           0 :         start := g.randPrefixToWrite(0.001)
     229           0 :         end := g.randPrefixToWrite(0.001)
     230           0 :         for g.cmp(start, end) == 0 {
     231           0 :                 end = g.randPrefixToWrite(0.05)
     232           0 :         }
     233           0 :         if g.cmp(start, end) > 0 {
     234           0 :                 start, end = end, start
     235           0 :         }
     236           0 :         return start, end
     237             : }
     238             : 
     239             : // randPrefixToWrite returns a prefix key (a key with no suffix) for a range key
     240             : // write operation.
     241           0 : func (g *generator) randPrefixToWrite(newPrefix float64) []byte {
     242           0 :         prefixes := g.keyManager.prefixes()
     243           0 :         if len(prefixes) > 0 && g.rng.Float64() > newPrefix {
     244           0 :                 // Use an existing prefix.
     245           0 :                 p := g.rng.Intn(len(prefixes))
     246           0 :                 return prefixes[p]
     247           0 :         }
     248             : 
     249             :         // Use a new prefix.
     250           0 :         var prefix []byte
     251           0 :         for {
     252           0 :                 prefix = g.randKeyHelperSuffix(nil, 4, 12, 0)
     253           0 :                 if !g.keyManager.prefixExists(prefix) {
     254           0 :                         if !g.keyManager.addNewKey(prefix) {
     255           0 :                                 panic("key must not exist if prefix doesn't exist")
     256             :                         }
     257           0 :                         return prefix
     258             :                 }
     259             :         }
     260             : }
     261             : 
     262             : // randSuffixToWrite generates a random suffix according to the configuration's suffix
     263             : // distribution. It takes a probability 0 ≤ p ≤ 1.0 indicating the probability
     264             : // with which the generator should increase the max suffix generated by the
     265             : // generator.
     266             : //
     267             : // randSuffixToWrite may return a nil suffix, with the probability the
     268             : // configuration's suffix distribution assigns to the zero suffix.
     269           0 : func (g *generator) randSuffixToWrite(incMaxProb float64) []byte {
     270           0 :         if g.rng.Float64() < incMaxProb {
     271           0 :                 g.cfg.writeSuffixDist.IncMax(1)
     272           0 :         }
     273           0 :         return suffixFromInt(int64(g.cfg.writeSuffixDist.Uint64(g.rng)))
     274             : }
     275             : 
     276             : // randSuffixToRead generates a random suffix used during reads. The suffixes
     277             : // generated by this function are within the same range as suffixes generated by
     278             : // randSuffixToWrite, however randSuffixToRead pulls from a uniform
     279             : // distribution.
     280           0 : func (g *generator) randSuffixToRead() []byte {
     281           0 :         // When reading, don't apply the recency skewing in order to better exercise
     282           0 :         // a reading a mix of older and newer keys.
     283           0 :         max := g.cfg.writeSuffixDist.Max()
     284           0 :         return suffixFromInt(g.rng.Int63n(int64(max)))
     285           0 : }
     286             : 
     287           0 : func suffixFromInt(suffix int64) []byte {
     288           0 :         // Treat the zero as no suffix to match the behavior during point key
     289           0 :         // generation in randKeyHelper.
     290           0 :         if suffix == 0 {
     291           0 :                 return nil
     292           0 :         }
     293           0 :         return testkeys.Suffix(suffix)
     294             : }
     295             : 
     296           0 : func (g *generator) randKeyToSingleDelete(id objID) []byte {
     297           0 :         keys := g.keyManager.eligibleSingleDeleteKeys(id)
     298           0 :         length := len(keys)
     299           0 :         if length == 0 {
     300           0 :                 return nil
     301           0 :         }
     302           0 :         return keys[g.rng.Intn(length)]
     303             : }
     304             : 
     305             : // randKeyToRead returns a key for read operations.
     306           0 : func (g *generator) randKeyToRead(newKey float64) []byte {
     307           0 :         return g.randKeyHelper(g.keyManager.knownKeys(), newKey, nil)
     308           0 : }
     309             : 
     310             : // randKeyToReadInRange returns a key for read operations within the provided
     311             : // key range. The bounds of the provided key range must span a prefix boundary.
     312           0 : func (g *generator) randKeyToReadInRange(newKey float64, kr pebble.KeyRange) []byte {
     313           0 :         return g.randKeyHelper(g.keyManager.knownKeysInRange(kr), newKey, &kr)
     314           0 : }
     315             : 
     316             : func (g *generator) randKeyHelper(
     317             :         keys [][]byte, newKey float64, newKeyBounds *pebble.KeyRange,
     318           0 : ) []byte {
     319           0 :         switch {
     320           0 :         case len(keys) > 0 && g.rng.Float64() > newKey:
     321           0 :                 // Use an existing user key.
     322           0 :                 return keys[g.rng.Intn(len(keys))]
     323             : 
     324           0 :         case len(keys) > 0 && g.rng.Float64() > g.cfg.newPrefix:
     325           0 :                 // Use an existing prefix but a new suffix, producing a new user key.
     326           0 :                 prefixes := g.keyManager.prefixes()
     327           0 : 
     328           0 :                 // If we're constrained to a key range, find which existing prefixes
     329           0 :                 // fall within that key range.
     330           0 :                 if newKeyBounds != nil {
     331           0 :                         s, _ := slices.BinarySearchFunc(prefixes, newKeyBounds.Start, g.cmp)
     332           0 :                         e, _ := slices.BinarySearchFunc(prefixes, newKeyBounds.End, g.cmp)
     333           0 :                         prefixes = prefixes[s:e]
     334           0 :                 }
     335             : 
     336           0 :                 if len(prefixes) > 0 {
     337           0 :                         for {
     338           0 :                                 // Pick a prefix on each iteration in case most or all suffixes are
     339           0 :                                 // already in use for any individual prefix.
     340           0 :                                 p := g.rng.Intn(len(prefixes))
     341           0 :                                 suffix := int64(g.cfg.writeSuffixDist.Uint64(g.rng))
     342           0 : 
     343           0 :                                 var key []byte
     344           0 :                                 if suffix > 0 {
     345           0 :                                         key = resizeBuffer(key, len(prefixes[p]), testkeys.SuffixLen(suffix))
     346           0 :                                         n := copy(key, prefixes[p])
     347           0 :                                         testkeys.WriteSuffix(key[n:], suffix)
     348           0 :                                 } else {
     349           0 :                                         key = resizeBuffer(key, len(prefixes[p]), 0)
     350           0 :                                         copy(key, prefixes[p])
     351           0 :                                 }
     352             : 
     353           0 :                                 if (newKeyBounds == nil || (g.cmp(key, newKeyBounds.Start) >= 0 && g.cmp(key, newKeyBounds.End) < 0)) &&
     354           0 :                                         g.keyManager.addNewKey(key) {
     355           0 :                                         return key
     356           0 :                                 }
     357             : 
     358             :                                 // If the generated key already existed, or the generated key
     359             :                                 // fell outside the provided bounds, increase the suffix
     360             :                                 // distribution and loop.
     361           0 :                                 g.cfg.writeSuffixDist.IncMax(1)
     362             :                         }
     363             :                 }
     364             :                 // Otherwise fall through to generating a new prefix.
     365           0 :                 fallthrough
     366             : 
     367           0 :         default:
     368           0 :                 // Use a new prefix, producing a new user key.
     369           0 : 
     370           0 :                 var key []byte
     371           0 : 
     372           0 :                 suffix := int64(g.cfg.writeSuffixDist.Uint64(g.rng))
     373           0 : 
     374           0 :                 // If we have bounds in which we need to generate the key, use
     375           0 :                 // testkeys.RandomSeparator to generate a key between the bounds.
     376           0 :                 if newKeyBounds != nil {
     377           0 :                         targetLength := 4 + g.rng.Intn(8)
     378           0 :                         key = testkeys.RandomSeparator(nil, g.prefix(newKeyBounds.Start), g.prefix(newKeyBounds.End),
     379           0 :                                 suffix, targetLength, g.rng)
     380           0 :                 } else {
     381           0 :                         for {
     382           0 :                                 key = g.randKeyHelperSuffix(nil, 4, 12, suffix)
     383           0 :                                 if !g.keyManager.prefixExists(key[:testkeys.Comparer.Split(key)]) {
     384           0 :                                         if !g.keyManager.addNewKey(key) {
     385           0 :                                                 panic("key must not exist if prefix doesn't exist")
     386             :                                         }
     387           0 :                                         break
     388             :                                 }
     389             :                         }
     390             :                 }
     391           0 :                 return key
     392             :         }
     393             : }
     394             : 
     395             : // randKeyHelperSuffix is a helper function for randKeyHelper, and should not be
     396             : // invoked directly.
     397             : func (g *generator) randKeyHelperSuffix(
     398             :         dst []byte, minPrefixLen, maxPrefixLen int, suffix int64,
     399           0 : ) []byte {
     400           0 :         n := minPrefixLen
     401           0 :         if maxPrefixLen > minPrefixLen {
     402           0 :                 n += g.rng.Intn(maxPrefixLen - minPrefixLen)
     403           0 :         }
     404             :         // In order to test a mix of suffixed and unsuffixed keys, omit the zero
     405             :         // suffix.
     406           0 :         if suffix == 0 {
     407           0 :                 dst = resizeBuffer(dst, n, 0)
     408           0 :                 g.fillRand(dst)
     409           0 :                 return dst
     410           0 :         }
     411           0 :         suffixLen := testkeys.SuffixLen(suffix)
     412           0 :         dst = resizeBuffer(dst, n, suffixLen)
     413           0 :         g.fillRand(dst[:n])
     414           0 :         testkeys.WriteSuffix(dst[n:], suffix)
     415           0 :         return dst
     416             : }
     417             : 
     418           0 : func resizeBuffer(buf []byte, prefixLen, suffixLen int) []byte {
     419           0 :         if cap(buf) >= prefixLen+suffixLen {
     420           0 :                 return buf[:prefixLen+suffixLen]
     421           0 :         }
     422           0 :         return make([]byte, prefixLen+suffixLen)
     423             : }
     424             : 
     425             : // TODO(peter): make the value size configurable. See valueSizeDist in
     426             : // config.go.
     427           0 : func (g *generator) randValue(min, max int) []byte {
     428           0 :         n := min
     429           0 :         if max > min {
     430           0 :                 n += g.rng.Intn(max - min)
     431           0 :         }
     432           0 :         if n == 0 {
     433           0 :                 return nil
     434           0 :         }
     435           0 :         buf := make([]byte, n)
     436           0 :         g.fillRand(buf)
     437           0 :         return buf
     438             : }
     439             : 
     440           0 : func (g *generator) fillRand(buf []byte) {
     441           0 :         // NB: The actual random values are not particularly important. We only use
     442           0 :         // lowercase letters because that makes visual determination of ordering
     443           0 :         // easier, rather than having to remember the lexicographic ordering of
     444           0 :         // uppercase vs lowercase, or letters vs numbers vs punctuation.
     445           0 :         const letters = "abcdefghijklmnopqrstuvwxyz"
     446           0 :         const lettersLen = uint64(len(letters))
     447           0 :         const lettersCharsPerRand = 12 // floor(log(math.MaxUint64)/log(lettersLen))
     448           0 : 
     449           0 :         var r uint64
     450           0 :         var q int
     451           0 :         for i := 0; i < len(buf); i++ {
     452           0 :                 if q == 0 {
     453           0 :                         r = g.rng.Uint64()
     454           0 :                         q = lettersCharsPerRand
     455           0 :                 }
     456           0 :                 buf[i] = letters[r%lettersLen]
     457           0 :                 r = r / lettersLen
     458           0 :                 q--
     459             :         }
     460             : }
     461             : 
     462           0 : func (g *generator) newBatch() {
     463           0 :         batchID := makeObjID(batchTag, g.init.batchSlots)
     464           0 :         g.init.batchSlots++
     465           0 :         g.liveBatches = append(g.liveBatches, batchID)
     466           0 :         g.liveWriters = append(g.liveWriters, batchID)
     467           0 :         dbID := g.dbs.rand(g.rng)
     468           0 :         g.objDB[batchID] = dbID
     469           0 : 
     470           0 :         g.add(&newBatchOp{
     471           0 :                 dbID:    dbID,
     472           0 :                 batchID: batchID,
     473           0 :         })
     474           0 : }
     475             : 
     476           0 : func (g *generator) newIndexedBatch() {
     477           0 :         batchID := makeObjID(batchTag, g.init.batchSlots)
     478           0 :         g.init.batchSlots++
     479           0 :         g.liveBatches = append(g.liveBatches, batchID)
     480           0 :         g.liveReaders = append(g.liveReaders, batchID)
     481           0 :         g.liveWriters = append(g.liveWriters, batchID)
     482           0 : 
     483           0 :         iters := make(objIDSet)
     484           0 :         g.batches[batchID] = iters
     485           0 :         g.readers[batchID] = iters
     486           0 :         dbID := g.dbs.rand(g.rng)
     487           0 :         g.objDB[batchID] = dbID
     488           0 : 
     489           0 :         g.add(&newIndexedBatchOp{
     490           0 :                 dbID:    dbID,
     491           0 :                 batchID: batchID,
     492           0 :         })
     493           0 : }
     494             : 
     495             : // removeFromBatchGenerator will not generate a closeOp for the target batch as
     496             : // not every batch that is removed from the generator should be closed. For
     497             : // example, running a closeOp before an ingestOp that contains the closed batch
     498             : // will cause an error.
     499           0 : func (g *generator) removeBatchFromGenerator(batchID objID) {
     500           0 :         g.liveBatches.remove(batchID)
     501           0 :         iters := g.batches[batchID]
     502           0 :         delete(g.batches, batchID)
     503           0 : 
     504           0 :         if iters != nil {
     505           0 :                 g.liveReaders.remove(batchID)
     506           0 :                 delete(g.readers, batchID)
     507           0 :         }
     508           0 :         g.liveWriters.remove(batchID)
     509           0 :         for _, id := range iters.sorted() {
     510           0 :                 g.liveIters.remove(id)
     511           0 :                 delete(g.iters, id)
     512           0 :                 g.add(&closeOp{objID: id})
     513           0 :         }
     514             : }
     515             : 
     516           0 : func (g *generator) batchAbort() {
     517           0 :         if len(g.liveBatches) == 0 {
     518           0 :                 return
     519           0 :         }
     520             : 
     521           0 :         batchID := g.liveBatches.rand(g.rng)
     522           0 :         g.removeBatchFromGenerator(batchID)
     523           0 : 
     524           0 :         g.add(&closeOp{objID: batchID})
     525             : }
     526             : 
     527           0 : func (g *generator) batchCommit() {
     528           0 :         if len(g.liveBatches) == 0 {
     529           0 :                 return
     530           0 :         }
     531             : 
     532           0 :         batchID := g.liveBatches.rand(g.rng)
     533           0 :         dbID := g.objDB[batchID]
     534           0 :         g.removeBatchFromGenerator(batchID)
     535           0 : 
     536           0 :         // The batch we're applying may contain single delete tombstones that when
     537           0 :         // applied to the writer result in nondeterminism in the deleted key. If
     538           0 :         // that's the case, we can restore determinism by first deleting the key
     539           0 :         // from the writer.
     540           0 :         //
     541           0 :         // Generating additional operations here is not ideal, but it simplifies
     542           0 :         // single delete invariants significantly.
     543           0 :         singleDeleteConflicts := g.keyManager.checkForSingleDelConflicts(batchID, dbID, false /* collapsed */)
     544           0 :         for _, conflict := range singleDeleteConflicts {
     545           0 :                 g.add(&deleteOp{
     546           0 :                         writerID:    dbID,
     547           0 :                         key:         conflict,
     548           0 :                         derivedDBID: dbID,
     549           0 :                 })
     550           0 :         }
     551             : 
     552           0 :         g.add(&batchCommitOp{
     553           0 :                 dbID:    dbID,
     554           0 :                 batchID: batchID,
     555           0 :         })
     556           0 :         g.add(&closeOp{objID: batchID})
     557             : 
     558             : }
     559             : 
     560           0 : func (g *generator) dbClose() {
     561           0 :         // Close any live iterators and snapshots, so that we can close the DB
     562           0 :         // cleanly.
     563           0 :         for len(g.liveIters) > 0 {
     564           0 :                 g.randIter(g.iterClose)()
     565           0 :         }
     566           0 :         for len(g.liveSnapshots) > 0 {
     567           0 :                 g.snapshotClose()
     568           0 :         }
     569           0 :         for len(g.liveBatches) > 0 {
     570           0 :                 batchID := g.liveBatches[0]
     571           0 :                 g.removeBatchFromGenerator(batchID)
     572           0 :                 g.add(&closeOp{objID: batchID})
     573           0 :         }
     574           0 :         for len(g.dbs) > 0 {
     575           0 :                 db := g.dbs[0]
     576           0 :                 g.dbs = g.dbs[1:]
     577           0 :                 g.add(&closeOp{objID: db})
     578           0 :         }
     579             : }
     580             : 
     581           0 : func (g *generator) dbCheckpoint() {
     582           0 :         // 1/2 of the time we don't restrict the checkpoint;
     583           0 :         // 1/4 of the time we restrict to 1 span;
     584           0 :         // 1/8 of the time we restrict to 2 spans; etc.
     585           0 :         numSpans := 0
     586           0 :         var spans []pebble.CheckpointSpan
     587           0 :         for g.rng.Intn(2) == 0 {
     588           0 :                 numSpans++
     589           0 :         }
     590           0 :         if numSpans > 0 {
     591           0 :                 spans = make([]pebble.CheckpointSpan, numSpans)
     592           0 :         }
     593           0 :         for i := range spans {
     594           0 :                 start := g.randKeyToRead(0.01)
     595           0 :                 end := g.randKeyToRead(0.01)
     596           0 :                 if g.cmp(start, end) > 0 {
     597           0 :                         start, end = end, start
     598           0 :                 }
     599           0 :                 spans[i].Start = start
     600           0 :                 spans[i].End = end
     601             :         }
     602           0 :         dbID := g.dbs.rand(g.rng)
     603           0 :         g.add(&checkpointOp{
     604           0 :                 dbID:  dbID,
     605           0 :                 spans: spans,
     606           0 :         })
     607             : }
     608             : 
     609           0 : func (g *generator) dbCompact() {
     610           0 :         // Generate new key(s) with a 1% probability.
     611           0 :         start := g.randKeyToRead(0.01)
     612           0 :         end := g.randKeyToRead(0.01)
     613           0 :         if g.cmp(start, end) > 0 {
     614           0 :                 start, end = end, start
     615           0 :         }
     616           0 :         dbID := g.dbs.rand(g.rng)
     617           0 :         g.add(&compactOp{
     618           0 :                 dbID:        dbID,
     619           0 :                 start:       start,
     620           0 :                 end:         end,
     621           0 :                 parallelize: g.rng.Float64() < 0.5,
     622           0 :         })
     623             : }
     624             : 
     625           0 : func (g *generator) dbFlush() {
     626           0 :         g.add(&flushOp{g.dbs.rand(g.rng)})
     627           0 : }
     628             : 
     629           0 : func (g *generator) dbRatchetFormatMajorVersion() {
     630           0 :         // Ratchet to a random format major version between the minimum the
     631           0 :         // metamorphic tests support and the newest. At runtime, the generated
     632           0 :         // version may be behind the database's format major version, in which case
     633           0 :         // RatchetFormatMajorVersion should deterministically error.
     634           0 : 
     635           0 :         dbID := g.dbs.rand(g.rng)
     636           0 :         n := int(newestFormatMajorVersionToTest - minimumFormatMajorVersion)
     637           0 :         vers := pebble.FormatMajorVersion(g.rng.Intn(n+1)) + minimumFormatMajorVersion
     638           0 :         g.add(&dbRatchetFormatMajorVersionOp{dbID: dbID, vers: vers})
     639           0 : }
     640             : 
     641           0 : func (g *generator) dbRestart() {
     642           0 :         // Close any live iterators and snapshots, so that we can close the DB
     643           0 :         // cleanly.
     644           0 :         dbID := g.dbs.rand(g.rng)
     645           0 :         for len(g.liveIters) > 0 {
     646           0 :                 g.randIter(g.iterClose)()
     647           0 :         }
     648           0 :         for len(g.liveSnapshots) > 0 {
     649           0 :                 g.snapshotClose()
     650           0 :         }
     651             :         // Close the batches.
     652           0 :         for len(g.liveBatches) > 0 {
     653           0 :                 batchID := g.liveBatches[0]
     654           0 :                 g.removeBatchFromGenerator(batchID)
     655           0 :                 g.add(&closeOp{objID: batchID})
     656           0 :         }
     657           0 :         if len(g.liveReaders) != len(g.dbs) || len(g.liveWriters) != len(g.dbs) {
     658           0 :                 panic(fmt.Sprintf("unexpected counts: liveReaders %d, liveWriters: %d",
     659           0 :                         len(g.liveReaders), len(g.liveWriters)))
     660             :         }
     661           0 :         g.add(&dbRestartOp{dbID: dbID})
     662             : }
     663             : 
     664             : // maybeSetSnapshotIterBounds must be called whenever creating a new iterator or
     665             : // modifying the bounds of an iterator. If the iterator is backed by a snapshot
     666             : // that only guarantees consistency within a limited set of key spans, then the
     667             : // iterator must set bounds within one of the snapshot's consistent keyspans. It
     668             : // returns true if the provided readerID is a bounded snapshot and bounds were
     669             : // set.
     670           0 : func (g *generator) maybeSetSnapshotIterBounds(readerID objID, opts *iterOpts) bool {
     671           0 :         snapBounds, isBoundedSnapshot := g.snapshotBounds[readerID]
     672           0 :         if !isBoundedSnapshot {
     673           0 :                 return false
     674           0 :         }
     675             :         // Pick a random keyrange within one of the snapshot's key ranges.
     676           0 :         parentBounds := snapBounds[g.rng.Intn(len(snapBounds))]
     677           0 :         // With 10% probability, use the parent start bound as-is.
     678           0 :         if g.rng.Float64() <= 0.1 {
     679           0 :                 opts.lower = parentBounds.Start
     680           0 :         } else {
     681           0 :                 opts.lower = testkeys.RandomSeparator(
     682           0 :                         nil, /* dst */
     683           0 :                         parentBounds.Start,
     684           0 :                         parentBounds.End,
     685           0 :                         0, /* suffix */
     686           0 :                         4+g.rng.Intn(8),
     687           0 :                         g.rng,
     688           0 :                 )
     689           0 :         }
     690             :         // With 10% probability, use the parent end bound as-is.
     691           0 :         if g.rng.Float64() <= 0.1 {
     692           0 :                 opts.upper = parentBounds.End
     693           0 :         } else {
     694           0 :                 opts.upper = testkeys.RandomSeparator(
     695           0 :                         nil, /* dst */
     696           0 :                         opts.lower,
     697           0 :                         parentBounds.End,
     698           0 :                         0, /* suffix */
     699           0 :                         4+g.rng.Intn(8),
     700           0 :                         g.rng,
     701           0 :                 )
     702           0 :         }
     703           0 :         return true
     704             : }
     705             : 
     706           0 : func (g *generator) newIter() {
     707           0 :         iterID := makeObjID(iterTag, g.init.iterSlots)
     708           0 :         g.init.iterSlots++
     709           0 :         g.liveIters = append(g.liveIters, iterID)
     710           0 : 
     711           0 :         readerID := g.liveReaders.rand(g.rng)
     712           0 :         if iters := g.readers[readerID]; iters != nil {
     713           0 :                 iters[iterID] = struct{}{}
     714           0 :                 g.iters[iterID] = iters
     715           0 :                 //lint:ignore SA9003 - readability
     716           0 :         } else {
     717           0 :                 // NB: the DB object does not track its open iterators because it never
     718           0 :                 // closes.
     719           0 :         }
     720           0 :         g.iterReaderID[iterID] = readerID
     721           0 :         dbID := g.deriveDB(iterID)
     722           0 : 
     723           0 :         var opts iterOpts
     724           0 :         if !g.maybeSetSnapshotIterBounds(readerID, &opts) {
     725           0 :                 // Generate lower/upper bounds with a 10% probability.
     726           0 :                 if g.rng.Float64() <= 0.1 {
     727           0 :                         // Generate a new key with a .1% probability.
     728           0 :                         opts.lower = g.randKeyToRead(0.001)
     729           0 :                 }
     730           0 :                 if g.rng.Float64() <= 0.1 {
     731           0 :                         // Generate a new key with a .1% probability.
     732           0 :                         opts.upper = g.randKeyToRead(0.001)
     733           0 :                 }
     734           0 :                 if g.cmp(opts.lower, opts.upper) > 0 {
     735           0 :                         opts.lower, opts.upper = opts.upper, opts.lower
     736           0 :                 }
     737             :         }
     738           0 :         opts.keyTypes, opts.maskSuffix = g.randKeyTypesAndMask()
     739           0 : 
     740           0 :         // With 10% probability, enable automatic filtering of keys with suffixes
     741           0 :         // not in the provided range. This filtering occurs both through
     742           0 :         // block-property filtering and explicitly within the iterator operations to
     743           0 :         // ensure determinism.
     744           0 :         if g.rng.Float64() <= 0.1 {
     745           0 :                 max := g.cfg.writeSuffixDist.Max()
     746           0 :                 opts.filterMin, opts.filterMax = g.rng.Uint64n(max)+1, g.rng.Uint64n(max)+1
     747           0 :                 if opts.filterMin > opts.filterMax {
     748           0 :                         opts.filterMin, opts.filterMax = opts.filterMax, opts.filterMin
     749           0 :                 } else if opts.filterMin == opts.filterMax {
     750           0 :                         opts.filterMax = opts.filterMin + 1
     751           0 :                 }
     752             :         }
     753             : 
     754             :         // Enable L6 filters with a 10% probability.
     755           0 :         if g.rng.Float64() <= 0.1 {
     756           0 :                 opts.useL6Filters = true
     757           0 :         }
     758             : 
     759           0 :         g.itersLastOpts[iterID] = opts
     760           0 :         g.iterCreationTimestamp[iterID] = g.keyManager.nextMetaTimestamp()
     761           0 :         g.iterReaderID[iterID] = readerID
     762           0 :         g.add(&newIterOp{
     763           0 :                 readerID:    readerID,
     764           0 :                 iterID:      iterID,
     765           0 :                 iterOpts:    opts,
     766           0 :                 derivedDBID: dbID,
     767           0 :         })
     768             : }
     769             : 
     770           0 : func (g *generator) randKeyTypesAndMask() (keyTypes uint32, maskSuffix []byte) {
     771           0 :         // Iterate over different key types.
     772           0 :         p := g.rng.Float64()
     773           0 :         switch {
     774           0 :         case p < 0.2: // 20% probability
     775           0 :                 keyTypes = uint32(pebble.IterKeyTypePointsOnly)
     776           0 :         case p < 0.8: // 60% probability
     777           0 :                 keyTypes = uint32(pebble.IterKeyTypePointsAndRanges)
     778           0 :                 // With 50% probability, enable masking.
     779           0 :                 if g.rng.Intn(2) == 1 {
     780           0 :                         maskSuffix = g.randSuffixToRead()
     781           0 :                 }
     782           0 :         default: // 20% probability
     783           0 :                 keyTypes = uint32(pebble.IterKeyTypeRangesOnly)
     784             :         }
     785           0 :         return keyTypes, maskSuffix
     786             : }
     787             : 
     788           0 : func (g *generator) deriveDB(readerID objID) objID {
     789           0 :         if readerID.tag() == iterTag {
     790           0 :                 readerID = g.iterReaderID[readerID]
     791           0 :         }
     792           0 :         dbParentID := readerID
     793           0 :         if dbParentID.tag() != dbTag {
     794           0 :                 dbParentID = g.objDB[dbParentID]
     795           0 :         }
     796           0 :         g.objDB[readerID] = dbParentID
     797           0 :         return dbParentID
     798             : }
     799             : 
     800           0 : func (g *generator) newIterUsingClone() {
     801           0 :         if len(g.liveIters) == 0 {
     802           0 :                 return
     803           0 :         }
     804           0 :         existingIterID := g.liveIters.rand(g.rng)
     805           0 :         iterID := makeObjID(iterTag, g.init.iterSlots)
     806           0 :         g.init.iterSlots++
     807           0 :         g.liveIters = append(g.liveIters, iterID)
     808           0 :         if iters := g.iters[existingIterID]; iters != nil {
     809           0 :                 iters[iterID] = struct{}{}
     810           0 :                 g.iters[iterID] = iters
     811           0 :                 //lint:ignore SA9003 - readability
     812           0 :         } else {
     813           0 :                 // NB: the DB object does not track its open iterators because it never
     814           0 :                 // closes.
     815           0 :         }
     816           0 :         readerID := g.iterReaderID[existingIterID]
     817           0 :         g.iterReaderID[iterID] = readerID
     818           0 :         g.deriveDB(iterID)
     819           0 : 
     820           0 :         var refreshBatch bool
     821           0 :         if readerID.tag() == batchTag {
     822           0 :                 refreshBatch = g.rng.Intn(2) == 1
     823           0 :         }
     824             : 
     825           0 :         opts := g.itersLastOpts[existingIterID]
     826           0 :         // With 50% probability, consider modifying the iterator options used by the
     827           0 :         // clone.
     828           0 :         if g.rng.Intn(2) == 1 {
     829           0 :                 g.maybeMutateOptions(readerID, &opts)
     830           0 :         }
     831           0 :         g.itersLastOpts[iterID] = opts
     832           0 : 
     833           0 :         g.iterCreationTimestamp[iterID] = g.keyManager.nextMetaTimestamp()
     834           0 :         g.iterReaderID[iterID] = g.iterReaderID[existingIterID]
     835           0 :         g.add(&newIterUsingCloneOp{
     836           0 :                 existingIterID:  existingIterID,
     837           0 :                 iterID:          iterID,
     838           0 :                 refreshBatch:    refreshBatch,
     839           0 :                 iterOpts:        opts,
     840           0 :                 derivedReaderID: readerID,
     841           0 :         })
     842             : }
     843             : 
     844           0 : func (g *generator) iterClose(iterID objID) {
     845           0 :         g.liveIters.remove(iterID)
     846           0 :         if readerIters, ok := g.iters[iterID]; ok {
     847           0 :                 delete(g.iters, iterID)
     848           0 :                 delete(readerIters, iterID)
     849           0 :         }
     850             : 
     851           0 :         g.add(&closeOp{objID: iterID})
     852             : }
     853             : 
     854           0 : func (g *generator) iterSetBounds(iterID objID) {
     855           0 :         iterLastOpts := g.itersLastOpts[iterID]
     856           0 :         newOpts := iterLastOpts
     857           0 :         // TODO(jackson): The logic to increase the probability of advancing bounds
     858           0 :         // monotonically only applies if the snapshot is not bounded. Refactor to
     859           0 :         // allow bounded snapshots to benefit too, when possible.
     860           0 :         if !g.maybeSetSnapshotIterBounds(g.iterReaderID[iterID], &newOpts) {
     861           0 :                 var lower, upper []byte
     862           0 :                 genLower := g.rng.Float64() <= 0.9
     863           0 :                 genUpper := g.rng.Float64() <= 0.9
     864           0 :                 // When one of ensureLowerGE, ensureUpperLE is true, the new bounds
     865           0 :                 // don't overlap with the previous bounds.
     866           0 :                 var ensureLowerGE, ensureUpperLE bool
     867           0 :                 if genLower && iterLastOpts.upper != nil && g.rng.Float64() <= 0.9 {
     868           0 :                         ensureLowerGE = true
     869           0 :                 }
     870           0 :                 if (!ensureLowerGE || g.rng.Float64() < 0.5) && genUpper && iterLastOpts.lower != nil {
     871           0 :                         ensureUpperLE = true
     872           0 :                         ensureLowerGE = false
     873           0 :                 }
     874           0 :                 attempts := 0
     875           0 :                 for {
     876           0 :                         attempts++
     877           0 :                         if genLower {
     878           0 :                                 // Generate a new key with a .1% probability.
     879           0 :                                 lower = g.randKeyToRead(0.001)
     880           0 :                         }
     881           0 :                         if genUpper {
     882           0 :                                 // Generate a new key with a .1% probability.
     883           0 :                                 upper = g.randKeyToRead(0.001)
     884           0 :                         }
     885           0 :                         if g.cmp(lower, upper) > 0 {
     886           0 :                                 lower, upper = upper, lower
     887           0 :                         }
     888           0 :                         if ensureLowerGE && g.cmp(iterLastOpts.upper, lower) > 0 {
     889           0 :                                 if attempts < 25 {
     890           0 :                                         continue
     891             :                                 }
     892           0 :                                 lower = iterLastOpts.upper
     893           0 :                                 upper = lower
     894           0 :                                 break
     895             :                         }
     896           0 :                         if ensureUpperLE && g.cmp(upper, iterLastOpts.lower) > 0 {
     897           0 :                                 if attempts < 25 {
     898           0 :                                         continue
     899             :                                 }
     900           0 :                                 upper = iterLastOpts.lower
     901           0 :                                 lower = upper
     902           0 :                                 break
     903             :                         }
     904           0 :                         break
     905             :                 }
     906           0 :                 newOpts.lower = lower
     907           0 :                 newOpts.upper = upper
     908             :         }
     909           0 :         g.itersLastOpts[iterID] = newOpts
     910           0 :         g.add(&iterSetBoundsOp{
     911           0 :                 iterID: iterID,
     912           0 :                 lower:  newOpts.lower,
     913           0 :                 upper:  newOpts.upper,
     914           0 :         })
     915           0 :         // Additionally seek the iterator in a manner consistent with the bounds,
     916           0 :         // and do some steps (Next/Prev). The seeking exercises typical
     917           0 :         // CockroachDB behavior when using iterators and the steps are trying to
     918           0 :         // stress the region near the bounds. Ideally, we should not do this as
     919           0 :         // part of generating a single op, but this is easier than trying to
     920           0 :         // control future op generation via generator state.
     921           0 :         doSeekLT := newOpts.upper != nil && g.rng.Float64() < 0.5
     922           0 :         doSeekGE := newOpts.lower != nil && g.rng.Float64() < 0.5
     923           0 :         if doSeekLT && doSeekGE {
     924           0 :                 // Pick the seek.
     925           0 :                 if g.rng.Float64() < 0.5 {
     926           0 :                         doSeekGE = false
     927           0 :                 } else {
     928           0 :                         doSeekLT = false
     929           0 :                 }
     930             :         }
     931           0 :         if doSeekLT {
     932           0 :                 g.add(&iterSeekLTOp{
     933           0 :                         iterID:          iterID,
     934           0 :                         key:             newOpts.upper,
     935           0 :                         derivedReaderID: g.iterReaderID[iterID],
     936           0 :                 })
     937           0 :                 if g.rng.Float64() < 0.5 {
     938           0 :                         g.iterNext(iterID)
     939           0 :                 }
     940           0 :                 if g.rng.Float64() < 0.5 {
     941           0 :                         g.iterNext(iterID)
     942           0 :                 }
     943           0 :                 if g.rng.Float64() < 0.5 {
     944           0 :                         g.iterPrev(iterID)
     945           0 :                 }
     946           0 :         } else if doSeekGE {
     947           0 :                 g.add(&iterSeekGEOp{
     948           0 :                         iterID:          iterID,
     949           0 :                         key:             newOpts.lower,
     950           0 :                         derivedReaderID: g.iterReaderID[iterID],
     951           0 :                 })
     952           0 :                 if g.rng.Float64() < 0.5 {
     953           0 :                         g.iterPrev(iterID)
     954           0 :                 }
     955           0 :                 if g.rng.Float64() < 0.5 {
     956           0 :                         g.iterPrev(iterID)
     957           0 :                 }
     958           0 :                 if g.rng.Float64() < 0.5 {
     959           0 :                         g.iterNext(iterID)
     960           0 :                 }
     961             :         }
     962             : }
     963             : 
     964           0 : func (g *generator) iterSetOptions(iterID objID) {
     965           0 :         opts := g.itersLastOpts[iterID]
     966           0 :         g.maybeMutateOptions(g.iterReaderID[iterID], &opts)
     967           0 :         g.itersLastOpts[iterID] = opts
     968           0 :         g.add(&iterSetOptionsOp{
     969           0 :                 iterID:          iterID,
     970           0 :                 iterOpts:        opts,
     971           0 :                 derivedReaderID: g.iterReaderID[iterID],
     972           0 :         })
     973           0 : 
     974           0 :         // Additionally, perform a random absolute positioning operation. The
     975           0 :         // SetOptions contract requires one before the next relative positioning
     976           0 :         // operation. Ideally, we should not do this as part of generating a single
     977           0 :         // op, but this is easier than trying to control future op generation via
     978           0 :         // generator state.
     979           0 :         g.pickOneUniform(
     980           0 :                 g.iterFirst,
     981           0 :                 g.iterLast,
     982           0 :                 g.iterSeekGE,
     983           0 :                 g.iterSeekGEWithLimit,
     984           0 :                 g.iterSeekPrefixGE,
     985           0 :                 g.iterSeekLT,
     986           0 :                 g.iterSeekLTWithLimit,
     987           0 :         )(iterID)
     988           0 : }
     989             : 
     990           0 : func (g *generator) iterSeekGE(iterID objID) {
     991           0 :         g.add(&iterSeekGEOp{
     992           0 :                 iterID:          iterID,
     993           0 :                 key:             g.randKeyToRead(0.001), // 0.1% new keys
     994           0 :                 derivedReaderID: g.iterReaderID[iterID],
     995           0 :         })
     996           0 : }
     997             : 
     998           0 : func (g *generator) iterSeekGEWithLimit(iterID objID) {
     999           0 :         // 0.1% new keys
    1000           0 :         key, limit := g.randKeyToRead(0.001), g.randKeyToRead(0.001)
    1001           0 :         if g.cmp(key, limit) > 0 {
    1002           0 :                 key, limit = limit, key
    1003           0 :         }
    1004           0 :         g.add(&iterSeekGEOp{
    1005           0 :                 iterID:          iterID,
    1006           0 :                 key:             key,
    1007           0 :                 limit:           limit,
    1008           0 :                 derivedReaderID: g.iterReaderID[iterID],
    1009           0 :         })
    1010             : }
    1011             : 
    1012           0 : func (g *generator) randKeyToReadWithinBounds(lower, upper []byte, readerID objID) []*keyMeta {
    1013           0 :         var inRangeKeys []*keyMeta
    1014           0 :         for _, keyMeta := range g.keyManager.byObj[readerID] {
    1015           0 :                 posKey := keyMeta.key
    1016           0 :                 if g.cmp(posKey, lower) < 0 || g.cmp(posKey, upper) >= 0 {
    1017           0 :                         continue
    1018             :                 }
    1019           0 :                 inRangeKeys = append(inRangeKeys, keyMeta)
    1020             :         }
    1021           0 :         return inRangeKeys
    1022             : }
    1023             : 
    1024           0 : func (g *generator) iterSeekPrefixGE(iterID objID) {
    1025           0 :         lower := g.itersLastOpts[iterID].lower
    1026           0 :         upper := g.itersLastOpts[iterID].upper
    1027           0 :         iterCreationTimestamp := g.iterCreationTimestamp[iterID]
    1028           0 :         var key []byte
    1029           0 : 
    1030           0 :         // We try to make sure that the SeekPrefixGE key is within the iter bounds,
    1031           0 :         // and that the iter can read the key. If the key was created on a batch
    1032           0 :         // which deleted the key, then the key will still be considered visible
    1033           0 :         // by the current logic. We're also not accounting for keys written to
    1034           0 :         // batches which haven't been presisted to the DB. But we're only picking
    1035           0 :         // keys in a best effort manner, and the logic is better than picking a
    1036           0 :         // random key.
    1037           0 :         if g.rng.Intn(10) >= 1 {
    1038           0 :                 possibleKeys := make([][]byte, 0, 100)
    1039           0 :                 inRangeKeys := g.randKeyToReadWithinBounds(lower, upper, g.objDB[iterID])
    1040           0 :                 for _, keyMeta := range inRangeKeys {
    1041           0 :                         visibleHistory := keyMeta.history.before(iterCreationTimestamp)
    1042           0 : 
    1043           0 :                         // Check if the last op on this key set a value, (eg SETs, MERGEs).
    1044           0 :                         // If the key should be visible to the iterator and it would make a
    1045           0 :                         // good candidate for a SeekPrefixGE.
    1046           0 :                         if visibleHistory.hasVisibleValue() {
    1047           0 :                                 possibleKeys = append(possibleKeys, keyMeta.key)
    1048           0 :                         }
    1049             :                 }
    1050             : 
    1051           0 :                 if len(possibleKeys) > 0 {
    1052           0 :                         key = []byte(possibleKeys[g.rng.Int31n(int32(len(possibleKeys)))])
    1053           0 :                 }
    1054             :         }
    1055             : 
    1056           0 :         if key == nil {
    1057           0 :                 // TODO(bananabrick): We should try and use keys within the bounds,
    1058           0 :                 // even if we couldn't find any keys visible to the iterator. However,
    1059           0 :                 // doing this in experiments didn't really increase the valid
    1060           0 :                 // SeekPrefixGE calls by much.
    1061           0 :                 key = g.randKeyToRead(0) // 0% new keys
    1062           0 :         }
    1063             : 
    1064           0 :         g.add(&iterSeekPrefixGEOp{
    1065           0 :                 iterID:          iterID,
    1066           0 :                 key:             key,
    1067           0 :                 derivedReaderID: g.iterReaderID[iterID],
    1068           0 :         })
    1069             : }
    1070             : 
    1071           0 : func (g *generator) iterSeekLT(iterID objID) {
    1072           0 :         g.add(&iterSeekLTOp{
    1073           0 :                 iterID:          iterID,
    1074           0 :                 key:             g.randKeyToRead(0.001), // 0.1% new keys
    1075           0 :                 derivedReaderID: g.iterReaderID[iterID],
    1076           0 :         })
    1077           0 : }
    1078             : 
    1079           0 : func (g *generator) iterSeekLTWithLimit(iterID objID) {
    1080           0 :         // 0.1% new keys
    1081           0 :         key, limit := g.randKeyToRead(0.001), g.randKeyToRead(0.001)
    1082           0 :         if g.cmp(limit, key) > 0 {
    1083           0 :                 key, limit = limit, key
    1084           0 :         }
    1085           0 :         g.add(&iterSeekLTOp{
    1086           0 :                 iterID:          iterID,
    1087           0 :                 key:             key,
    1088           0 :                 limit:           limit,
    1089           0 :                 derivedReaderID: g.iterReaderID[iterID],
    1090           0 :         })
    1091             : }
    1092             : 
    1093             : // randIter performs partial func application ("currying"), returning a new
    1094             : // function that supplies the given func with a random iterator.
    1095           0 : func (g *generator) randIter(gen func(objID)) func() {
    1096           0 :         return func() {
    1097           0 :                 if len(g.liveIters) == 0 {
    1098           0 :                         return
    1099           0 :                 }
    1100           0 :                 gen(g.liveIters.rand(g.rng))
    1101             :         }
    1102             : }
    1103             : 
    1104           0 : func (g *generator) iterFirst(iterID objID) {
    1105           0 :         g.add(&iterFirstOp{
    1106           0 :                 iterID:          iterID,
    1107           0 :                 derivedReaderID: g.iterReaderID[iterID],
    1108           0 :         })
    1109           0 : }
    1110             : 
    1111           0 : func (g *generator) iterLast(iterID objID) {
    1112           0 :         g.add(&iterLastOp{
    1113           0 :                 iterID:          iterID,
    1114           0 :                 derivedReaderID: g.iterReaderID[iterID],
    1115           0 :         })
    1116           0 : }
    1117             : 
    1118           0 : func (g *generator) iterNext(iterID objID) {
    1119           0 :         g.add(&iterNextOp{
    1120           0 :                 iterID:          iterID,
    1121           0 :                 derivedReaderID: g.iterReaderID[iterID],
    1122           0 :         })
    1123           0 : }
    1124             : 
    1125           0 : func (g *generator) iterPrev(iterID objID) {
    1126           0 :         g.add(&iterPrevOp{
    1127           0 :                 iterID:          iterID,
    1128           0 :                 derivedReaderID: g.iterReaderID[iterID],
    1129           0 :         })
    1130           0 : }
    1131             : 
    1132           0 : func (g *generator) iterNextWithLimit(iterID objID) {
    1133           0 :         g.add(&iterNextOp{
    1134           0 :                 iterID:          iterID,
    1135           0 :                 limit:           g.randKeyToRead(0.001), // 0.1% new keys
    1136           0 :                 derivedReaderID: g.iterReaderID[iterID],
    1137           0 :         })
    1138           0 : }
    1139             : 
    1140           0 : func (g *generator) iterNextPrefix(iterID objID) {
    1141           0 :         g.add(&iterNextPrefixOp{
    1142           0 :                 iterID:          iterID,
    1143           0 :                 derivedReaderID: g.iterReaderID[iterID],
    1144           0 :         })
    1145           0 : }
    1146             : 
    1147           0 : func (g *generator) iterCanSingleDelete(iterID objID) {
    1148           0 :         g.add(&iterCanSingleDelOp{
    1149           0 :                 iterID:          iterID,
    1150           0 :                 derivedReaderID: g.iterReaderID[iterID],
    1151           0 :         })
    1152           0 : }
    1153             : 
    1154           0 : func (g *generator) iterPrevWithLimit(iterID objID) {
    1155           0 :         g.add(&iterPrevOp{
    1156           0 :                 iterID:          iterID,
    1157           0 :                 limit:           g.randKeyToRead(0.001), // 0.1% new keys
    1158           0 :                 derivedReaderID: g.iterReaderID[iterID],
    1159           0 :         })
    1160           0 : }
    1161             : 
    1162           0 : func (g *generator) readerGet() {
    1163           0 :         if len(g.liveReaders) == 0 {
    1164           0 :                 return
    1165           0 :         }
    1166             : 
    1167           0 :         readerID := g.liveReaders.rand(g.rng)
    1168           0 : 
    1169           0 :         // If the chosen reader is a snapshot created with user-specified key
    1170           0 :         // ranges, restrict the read to fall within one of the provided key ranges.
    1171           0 :         var key []byte
    1172           0 :         if bounds := g.snapshotBounds[readerID]; len(bounds) > 0 {
    1173           0 :                 kr := bounds[g.rng.Intn(len(bounds))]
    1174           0 :                 key = g.randKeyToReadInRange(0.001, kr) // 0.1% new keys
    1175           0 :         } else {
    1176           0 :                 key = g.randKeyToRead(0.001) // 0.1% new keys
    1177           0 :         }
    1178           0 :         derivedDBID := objID(0)
    1179           0 :         if readerID.tag() == batchTag || readerID.tag() == snapTag {
    1180           0 :                 derivedDBID = g.deriveDB(readerID)
    1181           0 :         }
    1182           0 :         g.add(&getOp{readerID: readerID, key: key, derivedDBID: derivedDBID})
    1183             : }
    1184             : 
    1185           0 : func (g *generator) replicate() {
    1186           0 :         if len(g.dbs) < 2 {
    1187           0 :                 return
    1188           0 :         }
    1189             : 
    1190           0 :         source := g.dbs.rand(g.rng)
    1191           0 :         dest := source
    1192           0 :         for dest == source {
    1193           0 :                 dest = g.dbs.rand(g.rng)
    1194           0 :         }
    1195             : 
    1196           0 :         startKey, endKey := g.prefixKeyRange()
    1197           0 :         g.add(&replicateOp{
    1198           0 :                 source: source,
    1199           0 :                 dest:   dest,
    1200           0 :                 start:  startKey,
    1201           0 :                 end:    endKey,
    1202           0 :         })
    1203             : }
    1204             : 
    1205             : // generateDisjointKeyRanges generates n disjoint key ranges.
    1206           0 : func (g *generator) generateDisjointKeyRanges(n int) []pebble.KeyRange {
    1207           0 :         bounds := make([][]byte, 2*n)
    1208           0 :         used := map[string]bool{}
    1209           0 :         for i := 0; i < len(bounds); i++ {
    1210           0 :                 k := g.prefix(g.randKeyToRead(0.1))
    1211           0 :                 for used[string(k)] {
    1212           0 :                         k = g.prefix(g.randKeyToRead(0.1))
    1213           0 :                 }
    1214           0 :                 bounds[i] = k
    1215           0 :                 used[string(k)] = true
    1216             :         }
    1217           0 :         slices.SortFunc(bounds, g.cmp)
    1218           0 :         keyRanges := make([]pebble.KeyRange, n)
    1219           0 :         for i := range keyRanges {
    1220           0 :                 keyRanges[i] = pebble.KeyRange{
    1221           0 :                         Start: bounds[i*2],
    1222           0 :                         End:   bounds[i*2+1],
    1223           0 :                 }
    1224           0 :         }
    1225           0 :         return keyRanges
    1226             : }
    1227             : 
    1228           0 : func (g *generator) newSnapshot() {
    1229           0 :         snapID := makeObjID(snapTag, g.init.snapshotSlots)
    1230           0 :         g.init.snapshotSlots++
    1231           0 :         g.liveSnapshots = append(g.liveSnapshots, snapID)
    1232           0 :         g.liveReaders = append(g.liveReaders, snapID)
    1233           0 :         dbID := g.dbs.rand(g.rng)
    1234           0 :         g.objDB[snapID] = dbID
    1235           0 : 
    1236           0 :         iters := make(objIDSet)
    1237           0 :         g.snapshots[snapID] = iters
    1238           0 :         g.readers[snapID] = iters
    1239           0 : 
    1240           0 :         s := &newSnapshotOp{
    1241           0 :                 dbID:   dbID,
    1242           0 :                 snapID: snapID,
    1243           0 :         }
    1244           0 : 
    1245           0 :         // Impose bounds on the keys that may be read with the snapshot. Setting bounds
    1246           0 :         // allows some runs of the metamorphic test to use a EventuallyFileOnlySnapshot
    1247           0 :         // instead of a Snapshot, testing equivalence between the two for reads within
    1248           0 :         // those bounds.
    1249           0 :         s.bounds = g.generateDisjointKeyRanges(
    1250           0 :                 g.rng.Intn(5) + 1, /* between 1-5 */
    1251           0 :         )
    1252           0 :         g.snapshotBounds[snapID] = s.bounds
    1253           0 :         g.add(s)
    1254           0 : }
    1255             : 
    1256           0 : func (g *generator) snapshotClose() {
    1257           0 :         if len(g.liveSnapshots) == 0 {
    1258           0 :                 return
    1259           0 :         }
    1260             : 
    1261           0 :         snapID := g.liveSnapshots.rand(g.rng)
    1262           0 :         g.liveSnapshots.remove(snapID)
    1263           0 :         iters := g.snapshots[snapID]
    1264           0 :         delete(g.snapshots, snapID)
    1265           0 :         g.liveReaders.remove(snapID)
    1266           0 :         delete(g.readers, snapID)
    1267           0 : 
    1268           0 :         for _, id := range iters.sorted() {
    1269           0 :                 g.liveIters.remove(id)
    1270           0 :                 delete(g.iters, id)
    1271           0 :                 g.add(&closeOp{objID: id})
    1272           0 :         }
    1273             : 
    1274           0 :         g.add(&closeOp{objID: snapID})
    1275             : }
    1276             : 
    1277           0 : func (g *generator) writerApply() {
    1278           0 :         if len(g.liveBatches) == 0 {
    1279           0 :                 return
    1280           0 :         }
    1281           0 :         if len(g.liveWriters) < 2 {
    1282           0 :                 panic(fmt.Sprintf("insufficient liveWriters (%d) to apply batch", len(g.liveWriters)))
    1283             :         }
    1284             : 
    1285           0 :         batchID := g.liveBatches.rand(g.rng)
    1286           0 :         dbID := g.objDB[batchID]
    1287           0 : 
    1288           0 :         var writerID objID
    1289           0 :         for {
    1290           0 :                 // NB: The writer we're applying to, as well as the batch we're applying,
    1291           0 :                 // must be from the same DB. The writer could be the db itself. Applying
    1292           0 :                 // a batch from one DB on another DB results in a panic, so avoid that.
    1293           0 :                 writerID = g.liveWriters.rand(g.rng)
    1294           0 :                 writerDBID := writerID
    1295           0 :                 if writerID.tag() != dbTag {
    1296           0 :                         writerDBID = g.objDB[writerID]
    1297           0 :                 }
    1298           0 :                 if writerID != batchID && writerDBID == dbID {
    1299           0 :                         break
    1300             :                 }
    1301             :         }
    1302             : 
    1303             :         // The batch we're applying may contain single delete tombstones that when
    1304             :         // applied to the writer result in nondeterminism in the deleted key. If
    1305             :         // that's the case, we can restore determinism by first deleting the key
    1306             :         // from the writer.
    1307             :         //
    1308             :         // Generating additional operations here is not ideal, but it simplifies
    1309             :         // single delete invariants significantly.
    1310           0 :         singleDeleteConflicts := g.keyManager.checkForSingleDelConflicts(batchID, writerID, false /* collapsed */)
    1311           0 :         for _, conflict := range singleDeleteConflicts {
    1312           0 :                 g.add(&deleteOp{
    1313           0 :                         writerID:    writerID,
    1314           0 :                         key:         conflict,
    1315           0 :                         derivedDBID: dbID,
    1316           0 :                 })
    1317           0 :         }
    1318             : 
    1319           0 :         g.removeBatchFromGenerator(batchID)
    1320           0 : 
    1321           0 :         g.add(&applyOp{
    1322           0 :                 writerID: writerID,
    1323           0 :                 batchID:  batchID,
    1324           0 :         })
    1325           0 :         g.add(&closeOp{
    1326           0 :                 objID: batchID,
    1327           0 :         })
    1328             : }
    1329             : 
    1330           0 : func (g *generator) writerDelete() {
    1331           0 :         if len(g.liveWriters) == 0 {
    1332           0 :                 return
    1333           0 :         }
    1334             : 
    1335           0 :         writerID := g.liveWriters.rand(g.rng)
    1336           0 :         derivedDBID := writerID
    1337           0 :         if derivedDBID.tag() != dbTag {
    1338           0 :                 derivedDBID = g.objDB[writerID]
    1339           0 :         }
    1340           0 :         g.add(&deleteOp{
    1341           0 :                 writerID:    writerID,
    1342           0 :                 key:         g.randKeyToWrite(0.001), // 0.1% new keys
    1343           0 :                 derivedDBID: derivedDBID,
    1344           0 :         })
    1345             : }
    1346             : 
    1347           0 : func (g *generator) writerDeleteRange() {
    1348           0 :         if len(g.liveWriters) == 0 {
    1349           0 :                 return
    1350           0 :         }
    1351             : 
    1352           0 :         start := g.randKeyToWrite(0.001)
    1353           0 :         end := g.randKeyToWrite(0.001)
    1354           0 :         if g.cmp(start, end) > 0 {
    1355           0 :                 start, end = end, start
    1356           0 :         }
    1357             : 
    1358           0 :         writerID := g.liveWriters.rand(g.rng)
    1359           0 :         g.add(&deleteRangeOp{
    1360           0 :                 writerID: writerID,
    1361           0 :                 start:    start,
    1362           0 :                 end:      end,
    1363           0 :         })
    1364             : }
    1365             : 
    1366           0 : func (g *generator) writerRangeKeyDelete() {
    1367           0 :         if len(g.liveWriters) == 0 {
    1368           0 :                 return
    1369           0 :         }
    1370           0 :         start, end := g.prefixKeyRange()
    1371           0 : 
    1372           0 :         writerID := g.liveWriters.rand(g.rng)
    1373           0 :         g.add(&rangeKeyDeleteOp{
    1374           0 :                 writerID: writerID,
    1375           0 :                 start:    start,
    1376           0 :                 end:      end,
    1377           0 :         })
    1378             : }
    1379             : 
    1380           0 : func (g *generator) writerRangeKeySet() {
    1381           0 :         if len(g.liveWriters) == 0 {
    1382           0 :                 return
    1383           0 :         }
    1384           0 :         start, end := g.prefixKeyRange()
    1385           0 : 
    1386           0 :         // 90% of the time, set a suffix.
    1387           0 :         var suffix []byte
    1388           0 :         if g.rng.Float64() < 0.90 {
    1389           0 :                 // Increase the max suffix 5% of the time.
    1390           0 :                 suffix = g.randSuffixToWrite(0.05)
    1391           0 :         }
    1392             : 
    1393           0 :         writerID := g.liveWriters.rand(g.rng)
    1394           0 :         g.add(&rangeKeySetOp{
    1395           0 :                 writerID: writerID,
    1396           0 :                 start:    start,
    1397           0 :                 end:      end,
    1398           0 :                 suffix:   suffix,
    1399           0 :                 value:    g.randValue(0, maxValueSize),
    1400           0 :         })
    1401             : }
    1402             : 
    1403           0 : func (g *generator) writerRangeKeyUnset() {
    1404           0 :         if len(g.liveWriters) == 0 {
    1405           0 :                 return
    1406           0 :         }
    1407           0 :         start, end := g.prefixKeyRange()
    1408           0 : 
    1409           0 :         // 90% of the time, set a suffix.
    1410           0 :         var suffix []byte
    1411           0 :         if g.rng.Float64() < 0.90 {
    1412           0 :                 // Increase the max suffix 5% of the time.
    1413           0 :                 suffix = g.randSuffixToWrite(0.05)
    1414           0 :         }
    1415             : 
    1416             :         // TODO(jackson): Increase probability of effective unsets? Purely random
    1417             :         // unsets are unlikely to remove an active range key.
    1418             : 
    1419           0 :         writerID := g.liveWriters.rand(g.rng)
    1420           0 :         g.add(&rangeKeyUnsetOp{
    1421           0 :                 writerID: writerID,
    1422           0 :                 start:    start,
    1423           0 :                 end:      end,
    1424           0 :                 suffix:   suffix,
    1425           0 :         })
    1426             : }
    1427             : 
    1428           0 : func (g *generator) writerIngest() {
    1429           0 :         if len(g.liveBatches) == 0 {
    1430           0 :                 return
    1431           0 :         }
    1432             : 
    1433             :         // Ingest between 1 and 3 batches.
    1434           0 :         dbID := g.dbs.rand(g.rng)
    1435           0 :         n := min(1+g.rng.Intn(3), len(g.liveBatches))
    1436           0 :         batchIDs := make([]objID, n)
    1437           0 :         derivedDBIDs := make([]objID, n)
    1438           0 :         for i := 0; i < n; i++ {
    1439           0 :                 batchID := g.liveBatches.rand(g.rng)
    1440           0 :                 batchIDs[i] = batchID
    1441           0 :                 derivedDBIDs[i] = g.objDB[batchIDs[i]]
    1442           0 :                 g.removeBatchFromGenerator(batchID)
    1443           0 :         }
    1444             : 
    1445             :         // Ingestions may fail if the ingested sstables overlap one another.
    1446             :         // Either it succeeds and its keys are committed to the DB, or it fails and
    1447             :         // the keys are not committed.
    1448           0 :         if !g.keyManager.doObjectBoundsOverlap(batchIDs) {
    1449           0 :                 // This ingestion will succeed.
    1450           0 :                 //
    1451           0 :                 // The batches we're ingesting may contain single delete tombstones that
    1452           0 :                 // when applied to the writer result in nondeterminism in the deleted key.
    1453           0 :                 // If that's the case, we can restore determinism by first deleting the keys
    1454           0 :                 // from the writer.
    1455           0 :                 //
    1456           0 :                 // Generating additional operations here is not ideal, but it simplifies
    1457           0 :                 // single delete invariants significantly.
    1458           0 :                 for _, batchID := range batchIDs {
    1459           0 :                         singleDeleteConflicts := g.keyManager.checkForSingleDelConflicts(batchID, dbID, true /* collapsed */)
    1460           0 :                         for _, conflict := range singleDeleteConflicts {
    1461           0 :                                 g.add(&deleteOp{
    1462           0 :                                         writerID:    dbID,
    1463           0 :                                         key:         conflict,
    1464           0 :                                         derivedDBID: dbID,
    1465           0 :                                 })
    1466           0 :                         }
    1467             :                 }
    1468             :         }
    1469           0 :         g.add(&ingestOp{
    1470           0 :                 dbID:         dbID,
    1471           0 :                 batchIDs:     batchIDs,
    1472           0 :                 derivedDBIDs: derivedDBIDs,
    1473           0 :         })
    1474             : }
    1475             : 
    1476           0 : func (g *generator) writerIngestAndExcise() {
    1477           0 :         if len(g.liveBatches) == 0 {
    1478           0 :                 return
    1479           0 :         }
    1480             : 
    1481           0 :         dbID := g.dbs.rand(g.rng)
    1482           0 :         batchID := g.liveBatches.rand(g.rng)
    1483           0 :         g.removeBatchFromGenerator(batchID)
    1484           0 : 
    1485           0 :         start, end := g.prefixKeyRange()
    1486           0 :         derivedDBID := g.objDB[batchID]
    1487           0 : 
    1488           0 :         g.add(&ingestAndExciseOp{
    1489           0 :                 dbID:        dbID,
    1490           0 :                 batchID:     batchID,
    1491           0 :                 derivedDBID: derivedDBID,
    1492           0 :                 exciseStart: start,
    1493           0 :                 exciseEnd:   end,
    1494           0 :         })
    1495             : }
    1496             : 
    1497           0 : func (g *generator) writerMerge() {
    1498           0 :         if len(g.liveWriters) == 0 {
    1499           0 :                 return
    1500           0 :         }
    1501             : 
    1502           0 :         writerID := g.liveWriters.rand(g.rng)
    1503           0 :         g.add(&mergeOp{
    1504           0 :                 writerID: writerID,
    1505           0 :                 // 20% new keys.
    1506           0 :                 key:   g.randKeyToWrite(0.2),
    1507           0 :                 value: g.randValue(0, maxValueSize),
    1508           0 :         })
    1509             : }
    1510             : 
    1511           0 : func (g *generator) writerSet() {
    1512           0 :         if len(g.liveWriters) == 0 {
    1513           0 :                 return
    1514           0 :         }
    1515             : 
    1516           0 :         writerID := g.liveWriters.rand(g.rng)
    1517           0 :         g.add(&setOp{
    1518           0 :                 writerID: writerID,
    1519           0 :                 // 50% new keys.
    1520           0 :                 key:   g.randKeyToWrite(0.5),
    1521           0 :                 value: g.randValue(0, maxValueSize),
    1522           0 :         })
    1523             : }
    1524             : 
    1525           0 : func (g *generator) writerSingleDelete() {
    1526           0 :         if len(g.liveWriters) == 0 {
    1527           0 :                 return
    1528           0 :         }
    1529             : 
    1530           0 :         writerID := g.liveWriters.rand(g.rng)
    1531           0 :         key := g.randKeyToSingleDelete(writerID)
    1532           0 :         if key == nil {
    1533           0 :                 return
    1534           0 :         }
    1535           0 :         g.add(&singleDeleteOp{
    1536           0 :                 writerID: writerID,
    1537           0 :                 key:      key,
    1538           0 :                 // Keys eligible for single deletes can be removed with a regular
    1539           0 :                 // delete. Mutate a percentage of SINGLEDEL ops into DELETEs. Note that
    1540           0 :                 // here we are only determining whether the replacement *could* happen.
    1541           0 :                 // At test runtime, the `replaceSingleDelete` test option must also be
    1542           0 :                 // set to true for the single delete to be replaced.
    1543           0 :                 maybeReplaceDelete: g.rng.Float64() < 0.25,
    1544           0 :         })
    1545             : }
    1546             : 
    1547           0 : func (g *generator) maybeMutateOptions(readerID objID, opts *iterOpts) {
    1548           0 :         // With 95% probability, allow changes to any options at all. This ensures
    1549           0 :         // that in 5% of cases there are no changes, and SetOptions hits its fast
    1550           0 :         // path.
    1551           0 :         if g.rng.Intn(100) >= 5 {
    1552           0 :                 if !g.maybeSetSnapshotIterBounds(readerID, opts) {
    1553           0 :                         // With 1/3 probability, clear existing bounds.
    1554           0 :                         if opts.lower != nil && g.rng.Intn(3) == 0 {
    1555           0 :                                 opts.lower = nil
    1556           0 :                         }
    1557           0 :                         if opts.upper != nil && g.rng.Intn(3) == 0 {
    1558           0 :                                 opts.upper = nil
    1559           0 :                         }
    1560             :                         // With 1/3 probability, update the bounds.
    1561           0 :                         if g.rng.Intn(3) == 0 {
    1562           0 :                                 // Generate a new key with a .1% probability.
    1563           0 :                                 opts.lower = g.randKeyToRead(0.001)
    1564           0 :                         }
    1565           0 :                         if g.rng.Intn(3) == 0 {
    1566           0 :                                 // Generate a new key with a .1% probability.
    1567           0 :                                 opts.upper = g.randKeyToRead(0.001)
    1568           0 :                         }
    1569           0 :                         if g.cmp(opts.lower, opts.upper) > 0 {
    1570           0 :                                 opts.lower, opts.upper = opts.upper, opts.lower
    1571           0 :                         }
    1572             :                 }
    1573             : 
    1574             :                 // With 1/3 probability, update the key-types/mask.
    1575           0 :                 if g.rng.Intn(3) == 0 {
    1576           0 :                         opts.keyTypes, opts.maskSuffix = g.randKeyTypesAndMask()
    1577           0 :                 }
    1578             : 
    1579             :                 // With 1/3 probability, clear existing filter.
    1580           0 :                 if opts.filterMax > 0 && g.rng.Intn(3) == 0 {
    1581           0 :                         opts.filterMax, opts.filterMin = 0, 0
    1582           0 :                 }
    1583             :                 // With 10% probability, set a filter range.
    1584           0 :                 if g.rng.Intn(10) == 1 {
    1585           0 :                         max := g.cfg.writeSuffixDist.Max()
    1586           0 :                         opts.filterMin, opts.filterMax = g.rng.Uint64n(max)+1, g.rng.Uint64n(max)+1
    1587           0 :                         if opts.filterMin > opts.filterMax {
    1588           0 :                                 opts.filterMin, opts.filterMax = opts.filterMax, opts.filterMin
    1589           0 :                         } else if opts.filterMin == opts.filterMax {
    1590           0 :                                 opts.filterMax = opts.filterMin + 1
    1591           0 :                         }
    1592             :                 }
    1593             :                 // With 10% probability, flip enablement of L6 filters.
    1594           0 :                 if g.rng.Float64() <= 0.1 {
    1595           0 :                         opts.useL6Filters = !opts.useL6Filters
    1596           0 :                 }
    1597             :         }
    1598             : }
    1599             : 
    1600           0 : func (g *generator) pickOneUniform(options ...func(objID)) func(objID) {
    1601           0 :         i := g.rng.Intn(len(options))
    1602           0 :         return options[i]
    1603           0 : }
    1604             : 
    1605           0 : func (g *generator) cmp(a, b []byte) int {
    1606           0 :         return g.keyManager.comparer.Compare(a, b)
    1607           0 : }
    1608             : 
    1609           0 : func (g *generator) equal(a, b []byte) bool {
    1610           0 :         return g.keyManager.comparer.Equal(a, b)
    1611           0 : }
    1612             : 
    1613           0 : func (g *generator) split(a []byte) int {
    1614           0 :         return g.keyManager.comparer.Split(a)
    1615           0 : }
    1616             : 
    1617           0 : func (g *generator) prefix(a []byte) []byte {
    1618           0 :         return a[:g.split(a)]
    1619           0 : }
    1620             : 
    1621           0 : func (g *generator) String() string {
    1622           0 :         var buf bytes.Buffer
    1623           0 :         for _, op := range g.ops {
    1624           0 :                 fmt.Fprintf(&buf, "%s\n", op)
    1625           0 :         }
    1626           0 :         return buf.String()
    1627             : }

Generated by: LCOV version 1.14