LCOV - code coverage report
Current view: top level - pebble/metamorphic - key_manager.go (source / functions) Hit Total Coverage
Test: 2024-06-27 08:15Z 18b77232 - tests + meta.lcov Lines: 545 581 93.8 %
Date: 2024-06-27 08:16:54 Functions: 0 0 -

          Line data    Source code
       1             : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
       2             : // of this source code is governed by a BSD-style license that can be found in
       3             : // the LICENSE file.
       4             : 
       5             : package metamorphic
       6             : 
       7             : import (
       8             :         "cmp"
       9             :         "fmt"
      10             :         "slices"
      11             :         "strings"
      12             : 
      13             :         "github.com/cockroachdb/errors"
      14             :         "github.com/cockroachdb/pebble"
      15             :         "github.com/cockroachdb/pebble/internal/base"
      16             :         "github.com/cockroachdb/pebble/internal/testkeys"
      17             :         "github.com/stretchr/testify/require"
      18             : )
      19             : 
      20             : // keyMeta is metadata associated with an (objID, key) pair, where objID is
      21             : // a writer containing the key.
      22             : type keyMeta struct {
      23             :         objID objID
      24             :         key   []byte
      25             :         // history provides the history of writer operations applied against this
      26             :         // key on this object. history is always ordered by non-decreasing
      27             :         // metaTimestamp.
      28             :         history keyHistory
      29             : }
      30             : 
      31           1 : func (m *keyMeta) clear() {
      32           1 :         m.history = m.history[:0]
      33           1 : }
      34             : 
      35             : // mergeInto merges this metadata into the metadata for other, appending all of
      36             : // its individual operations to dst at the provided timestamp.
      37           1 : func (m *keyMeta) mergeInto(dst *keyMeta, ts int) {
      38           1 :         for _, op := range m.history {
      39           1 :                 // If the key is being merged into a database object and the operation
      40           1 :                 // is a delete, we can clear the destination history. Database objects
      41           1 :                 // are end points in the merging of keys and won't be the source of a
      42           1 :                 // future merge. Deletions cause all other operations to behave as
      43           1 :                 // though the key was never written to the database at all, so we don't
      44           1 :                 // need to consider it for maintaining single delete invariants.
      45           1 :                 //
      46           1 :                 // NB: There's a subtlety here in that isDelete() will return true if
      47           1 :                 // opType is a writerSingleDelete, but single deletes are capable of
      48           1 :                 // leaking information about the history of writes. However, that's
      49           1 :                 // okay, because as long as we're properly generating single deletes
      50           1 :                 // according to the W1 invariant described in keyManager's comment, a
      51           1 :                 // single delete is equivalent to delete for the current history.
      52           1 :                 if dst.objID.tag() == dbTag && op.opType.isDelete() {
      53           1 :                         dst.clear()
      54           1 :                         continue
      55             :                 }
      56           1 :                 dst.history = append(dst.history, keyHistoryItem{
      57           1 :                         opType:        op.opType,
      58           1 :                         metaTimestamp: ts,
      59           1 :                 })
      60             :         }
      61             : }
      62             : 
      63             : type bounds struct {
      64             :         smallest    []byte
      65             :         largest     []byte
      66             :         largestExcl bool // is largest exclusive?
      67             : }
      68             : 
      69           1 : func (b bounds) checkValid(cmp base.Compare) {
      70           1 :         if c := cmp(b.smallest, b.largest); c > 0 {
      71           0 :                 panic(fmt.Sprintf("invalid bound [%q, %q]", b.smallest, b.largest))
      72           1 :         } else if c == 0 && b.largestExcl {
      73           0 :                 panic(fmt.Sprintf("invalid bound [%q, %q)", b.smallest, b.largest))
      74             :         }
      75             : }
      76             : 
      77           1 : func (b bounds) String() string {
      78           1 :         if b.largestExcl {
      79           1 :                 return fmt.Sprintf("[%q,%q)", b.smallest, b.largest)
      80           1 :         }
      81           1 :         return fmt.Sprintf("[%q,%q]", b.smallest, b.largest)
      82             : }
      83             : 
      84             : // Overlaps returns true iff the bounds intersect.
      85           1 : func (b *bounds) Overlaps(cmp base.Compare, other bounds) bool {
      86           1 :         if b.IsUnset() || other.IsUnset() {
      87           1 :                 return false
      88           1 :         }
      89             :         // Is b strictly before other?
      90           1 :         if v := cmp(b.largest, other.smallest); v < 0 || (v == 0 && b.largestExcl) {
      91           0 :                 return false
      92           0 :         }
      93             :         // Is b strictly after other?
      94           1 :         if v := cmp(b.smallest, other.largest); v > 0 || (v == 0 && other.largestExcl) {
      95           1 :                 return false
      96           1 :         }
      97           1 :         return true
      98             : }
      99             : 
     100             : // IsUnset returns true if the bounds haven't been set.
     101           1 : func (b bounds) IsUnset() bool {
     102           1 :         return b.smallest == nil && b.largest == nil
     103           1 : }
     104             : 
     105             : // Expand potentially expands the receiver bounds to include the other given
     106             : // bounds. If the receiver is unset, the other bounds are copied.
     107           1 : func (b *bounds) Expand(cmp base.Compare, other bounds) {
     108           1 :         if other.IsUnset() {
     109           1 :                 return
     110           1 :         }
     111           1 :         other.checkValid(cmp)
     112           1 :         if b.IsUnset() {
     113           1 :                 *b = other
     114           1 :                 return
     115           1 :         }
     116           1 :         if cmp(b.smallest, other.smallest) > 0 {
     117           1 :                 b.smallest = other.smallest
     118           1 :         }
     119           1 :         if v := cmp(b.largest, other.largest); v < 0 || (v == 0 && b.largestExcl) {
     120           1 :                 b.largest = other.largest
     121           1 :                 b.largestExcl = other.largestExcl
     122           1 :         }
     123             : }
     124             : 
     125             : // keyManager tracks the write operations performed on keys in the generation
     126             : // phase of the metamorphic test. It maintains histories of operations performed
     127             : // against every unique user key on every writer object. These histories inform
     128             : // operation generation in order to maintain invariants that Pebble requires of
     129             : // end users, mostly around single deletions.
     130             : //
     131             : // A single deletion has a subtle requirement of the writer:
     132             : //
     133             : //      W1: The writer may only single delete a key `k` if `k` has been Set once
     134             : //          (and never MergeD) since the last delete.
     135             : //
     136             : // When a SINGLEDEL key deletes a SET key within a compaction, both the SET and
     137             : // the SINGLEDEL keys are elided. If multiple SETs of the key exist within the
     138             : // LSM, the SINGLEDEL reveals the lower SET. This behavior is dependent on the
     139             : // internal LSM state and nondeterministic. To ensure determinism, the end user
     140             : // must satisfy W1 and use single delete only when they can guarantee that the
     141             : // key has been set at most once since the last delete, preventing this rollback
     142             : // to a previous value.
     143             : //
     144             : // This W1 invariant requires a delicate dance during operation generation,
     145             : // because independent batches may be independently built and committed. With
     146             : // multi-instance variants of the metamorphic tests, keys in batches may
     147             : // ultimately be committed to any of several DB instances. To satisfy these
     148             : // requirements, the key manager tracks the history of every key on every
     149             : // writable object. When generating a new single deletion operation, the
     150             : // generator asks the key manager for a set of keys for which a single delete
     151             : // maintains the W1 invariant within the object itself. This object-local W1
     152             : // invariant (OLW1) is equivalent to W1 if one only ever performs write
     153             : // operations directly against individual DB objects.
     154             : //
     155             : // However with the existence of batches that receive writes independent of DB
     156             : // objects, W1 may be violated by appending the histories of two objects that
     157             : // independently satisfy OLW1. Consider a sequence such as:
     158             : //
     159             : //  1. db1.Set("foo")
     160             : //  2. batch1.Set("foo")
     161             : //  3. batch1.SingleDelete("foo")
     162             : //  4. db1.Apply(batch1)
     163             : //
     164             : // Both db1 and batch1 satisfy the object-local invariant OLW1. However the
     165             : // composition of the histories created by appending batch1's operations to db1
     166             : // creates a history that now violates W1 on db1. To detect this violation,
     167             : // batch applications/commits and ingestions examine the tail of the destination
     168             : // object's history and the head of the source batch's history. When a violation
     169             : // is detected, these operations insert additional Delete operations to clear
     170             : // the conflicting keys before proceeding with the conflicting operation. These
     171             : // deletes reset the key history.
     172             : //
     173             : // Note that this generation-time key tracking requires that operations be
     174             : // infallible, because a runtime failure would cause the key manager's state to
     175             : // diverge from the runtime object state. Ingestion operations pose an obstacle,
     176             : // because the generator may generate ingestions that fail due to overlapping
     177             : // sstables. Today, this complication is sidestepped by avoiding ingestion of
     178             : // multiple batches containing deletes or single deletes since loss of those
     179             : // specific operations on a key are what we cannot tolerate (doing SingleDelete
     180             : // on a key that has not been written to because the Set was lost is harmless).
     181             : //
     182             : // TODO(jackson): Instead, compute smallest and largest bounds of batches so
     183             : // that we know at generation-time whether or not an ingestion operation will
     184             : // fail and can avoid updating key state.
     185             : type keyManager struct {
     186             :         comparer *base.Comparer
     187             : 
     188             :         // metaTimestamp is used to provide a ordering over certain operations like
     189             :         // iter creation, updates to keys. Keeping track of the timestamp allows us
     190             :         // to make determinations such as whether a key will be visible to an
     191             :         // iterator.
     192             :         metaTimestamp int
     193             : 
     194             :         byObj map[objID]*objKeyMeta
     195             :         // globalKeys represents all the keys that have been generated so far. Not
     196             :         // all these keys have been written to. globalKeys is sorted.
     197             :         globalKeys [][]byte
     198             :         // globalKeysMap contains the same keys as globalKeys but in a map. It
     199             :         // ensures no duplication.
     200             :         globalKeysMap map[string]bool
     201             :         // globalKeyPrefixes contains all the key prefixes (as defined by the
     202             :         // comparer's Split) generated so far. globalKeyPrefixes is sorted.
     203             :         globalKeyPrefixes [][]byte
     204             :         // globalKeyPrefixesMap contains the same keys as globalKeyPrefixes. It
     205             :         // ensures no duplication.
     206             :         globalKeyPrefixesMap map[string]struct{}
     207             : }
     208             : 
     209             : type objKeyMeta struct {
     210             :         id objID
     211             :         // List of keys, and what has happened to each in this object.
     212             :         // Will be transferred when needed.
     213             :         keys map[string]*keyMeta
     214             :         // bounds holds user key bounds encompassing all the keys set within an
     215             :         // object. It's updated within `update` when a new op is generated.
     216             :         bounds bounds
     217             :         // These flags are true if the object has had range del or range key operations.
     218             :         hasRangeDels bool
     219             :         hasRangeKeys bool
     220             : }
     221             : 
     222             : // MergeKey adds the given key at the given meta timestamp, merging the histories as needed.
     223           1 : func (okm *objKeyMeta) MergeKey(k *keyMeta, ts int) {
     224           1 :         meta, ok := okm.keys[string(k.key)]
     225           1 :         if !ok {
     226           1 :                 meta = &keyMeta{
     227           1 :                         objID: okm.id,
     228           1 :                         key:   k.key,
     229           1 :                 }
     230           1 :                 okm.keys[string(k.key)] = meta
     231           1 :         }
     232           1 :         k.mergeInto(meta, ts)
     233             : }
     234             : 
     235             : // CollapseKeys collapses the history of all keys. Used with ingestion operation
     236             : // which only use the last value of any given key.
     237           1 : func (okm *objKeyMeta) CollapseKeys() {
     238           1 :         for _, keyMeta := range okm.keys {
     239           1 :                 keyMeta.history = keyMeta.history.collapsed()
     240           1 :         }
     241             : }
     242             : 
     243             : // MergeFrom merges the `from` metadata into this one, appending all of its
     244             : // individual operations at the provided timestamp.
     245           1 : func (okm *objKeyMeta) MergeFrom(from *objKeyMeta, metaTimestamp int, cmp base.Compare) {
     246           1 :         // The result should be the same regardless of the ordering of the keys.
     247           1 :         for _, k := range from.keys {
     248           1 :                 okm.MergeKey(k, metaTimestamp)
     249           1 :         }
     250           1 :         okm.bounds.Expand(cmp, from.bounds)
     251           1 :         okm.hasRangeDels = okm.hasRangeDels || from.hasRangeDels
     252           1 :         okm.hasRangeKeys = okm.hasRangeKeys || from.hasRangeKeys
     253             : }
     254             : 
     255             : // objKeyMeta looks up the objKeyMeta for a given object, creating it if necessary.
     256           1 : func (k *keyManager) objKeyMeta(o objID) *objKeyMeta {
     257           1 :         m, ok := k.byObj[o]
     258           1 :         if !ok {
     259           1 :                 m = &objKeyMeta{
     260           1 :                         id:   o,
     261           1 :                         keys: make(map[string]*keyMeta),
     262           1 :                 }
     263           1 :                 k.byObj[o] = m
     264           1 :         }
     265           1 :         return m
     266             : }
     267             : 
     268             : // SortedKeysForObj returns all the entries in objKeyMeta(o).keys, in sorted
     269             : // order.
     270           1 : func (k *keyManager) SortedKeysForObj(o objID) []keyMeta {
     271           1 :         okm := k.objKeyMeta(o)
     272           1 :         res := make([]keyMeta, 0, len(okm.keys))
     273           1 :         for _, m := range okm.keys {
     274           1 :                 res = append(res, *m)
     275           1 :         }
     276           1 :         slices.SortFunc(res, func(a, b keyMeta) int {
     277           1 :                 cmp := k.comparer.Compare(a.key, b.key)
     278           1 :                 if cmp == 0 {
     279           0 :                         panic(fmt.Sprintf("distinct keys %q and %q compared as equal", a.key, b.key))
     280             :                 }
     281           1 :                 return cmp
     282             :         })
     283           1 :         return res
     284             : }
     285             : 
     286             : // InRangeKeysForObj returns all keys in the range [lower, upper) associated with the
     287             : // given object, in sorted order. If either of the bounds is nil, it is ignored.
     288           1 : func (k *keyManager) InRangeKeysForObj(o objID, lower, upper []byte) []keyMeta {
     289           1 :         var inRangeKeys []keyMeta
     290           1 :         for _, km := range k.SortedKeysForObj(o) {
     291           1 :                 if (lower == nil || k.comparer.Compare(km.key, lower) >= 0) &&
     292           1 :                         (upper == nil || k.comparer.Compare(km.key, upper) < 0) {
     293           1 :                         inRangeKeys = append(inRangeKeys, km)
     294           1 :                 }
     295             :         }
     296           1 :         return inRangeKeys
     297             : 
     298             : }
     299             : 
     300             : // KeysForExternalIngest returns the keys that will be ingested with an external
     301             : // object (taking into consideration the bounds, synthetic suffix, etc).
     302           1 : func (k *keyManager) KeysForExternalIngest(obj externalObjWithBounds) []keyMeta {
     303           1 :         var res []keyMeta
     304           1 :         for _, km := range k.SortedKeysForObj(obj.externalObjID) {
     305           1 :                 // Apply prefix and suffix changes, then check the bounds.
     306           1 :                 if obj.syntheticPrefix.IsSet() {
     307           1 :                         km.key = obj.syntheticPrefix.Apply(km.key)
     308           1 :                 }
     309           1 :                 if obj.syntheticSuffix.IsSet() {
     310           1 :                         n := k.comparer.Split(km.key)
     311           1 :                         km.key = append(km.key[:n:n], obj.syntheticSuffix...)
     312           1 :                 }
     313           1 :                 if k.comparer.Compare(km.key, obj.bounds.Start) >= 0 && k.comparer.Compare(km.key, obj.bounds.End) < 0 {
     314           1 :                         res = append(res, km)
     315           1 :                 }
     316             :         }
     317             :         // Check for duplicate resulting keys.
     318           1 :         for i := 1; i < len(res); i++ {
     319           1 :                 if k.comparer.Compare(res[i].key, res[i-1].key) == 0 {
     320           0 :                         panic(fmt.Sprintf("duplicate external ingest key %q", res[i].key))
     321             :                 }
     322             :         }
     323           1 :         return res
     324             : }
     325             : 
     326           1 : func (k *keyManager) nextMetaTimestamp() int {
     327           1 :         ret := k.metaTimestamp
     328           1 :         k.metaTimestamp++
     329           1 :         return ret
     330           1 : }
     331             : 
     332             : // newKeyManager returns a pointer to a new keyManager. Callers should
     333             : // interact with this using addNewKey, knownKeys, update methods only.
     334           1 : func newKeyManager(numInstances int) *keyManager {
     335           1 :         m := &keyManager{
     336           1 :                 comparer:             testkeys.Comparer,
     337           1 :                 byObj:                make(map[objID]*objKeyMeta),
     338           1 :                 globalKeysMap:        make(map[string]bool),
     339           1 :                 globalKeyPrefixesMap: make(map[string]struct{}),
     340           1 :         }
     341           1 :         for i := 1; i <= max(numInstances, 1); i++ {
     342           1 :                 m.objKeyMeta(makeObjID(dbTag, uint32(i)))
     343           1 :         }
     344           1 :         return m
     345             : }
     346             : 
     347             : // addNewKey adds the given key to the key manager for global key tracking.
     348             : // Returns false iff this is not a new key.
     349           1 : func (k *keyManager) addNewKey(key []byte) bool {
     350           1 :         if k.globalKeysMap[string(key)] {
     351           1 :                 return false
     352           1 :         }
     353           1 :         insertSorted(k.comparer.Compare, &k.globalKeys, key)
     354           1 :         k.globalKeysMap[string(key)] = true
     355           1 : 
     356           1 :         prefixLen := k.comparer.Split(key)
     357           1 :         if prefixLen == 0 {
     358           0 :                 panic(fmt.Sprintf("key %q has zero length prefix", key))
     359             :         }
     360           1 :         if _, ok := k.globalKeyPrefixesMap[string(key[:prefixLen])]; !ok {
     361           1 :                 insertSorted(k.comparer.Compare, &k.globalKeyPrefixes, key[:prefixLen])
     362           1 :                 k.globalKeyPrefixesMap[string(key[:prefixLen])] = struct{}{}
     363           1 :         }
     364           1 :         return true
     365             : }
     366             : 
     367             : // getOrInit returns the keyMeta for the (objID, key) pair, if it exists, else
     368             : // allocates, initializes and returns a new value.
     369           1 : func (k *keyManager) getOrInit(id objID, key []byte) *keyMeta {
     370           1 :         objKeys := k.objKeyMeta(id)
     371           1 :         m, ok := objKeys.keys[string(key)]
     372           1 :         if ok {
     373           1 :                 return m
     374           1 :         }
     375           1 :         m = &keyMeta{
     376           1 :                 objID: id,
     377           1 :                 key:   key,
     378           1 :         }
     379           1 :         // Initialize the key-to-meta index.
     380           1 :         objKeys.keys[string(key)] = m
     381           1 :         // Expand the object's bounds to contain this key if they don't already.
     382           1 :         objKeys.bounds.Expand(k.comparer.Compare, k.makeSingleKeyBounds(key))
     383           1 :         return m
     384             : }
     385             : 
     386             : // mergeObjectInto merges obj key metadata from an object into another and
     387             : // deletes the metadata for the source object (which must not be used again).
     388           1 : func (k *keyManager) mergeObjectInto(from, to objID) {
     389           1 :         toMeta := k.objKeyMeta(to)
     390           1 :         ts := k.nextMetaTimestamp()
     391           1 :         toMeta.MergeFrom(k.objKeyMeta(from), ts, k.comparer.Compare)
     392           1 : 
     393           1 :         delete(k.byObj, from)
     394           1 : }
     395             : 
     396             : // expandBounds expands the incrementally maintained bounds of o to be at least
     397             : // as wide as `b`.
     398           1 : func (k *keyManager) expandBounds(o objID, b bounds) {
     399           1 :         k.objKeyMeta(o).bounds.Expand(k.comparer.Compare, b)
     400           1 : }
     401             : 
     402             : // doObjectBoundsOverlap returns true iff any of the named objects have key
     403             : // bounds that overlap any other named object.
     404           1 : func (k *keyManager) doObjectBoundsOverlap(objIDs []objID) bool {
     405           1 :         for i := range objIDs {
     406           1 :                 ib, iok := k.byObj[objIDs[i]]
     407           1 :                 if !iok {
     408           1 :                         continue
     409             :                 }
     410           1 :                 for j := i + 1; j < len(objIDs); j++ {
     411           1 :                         jb, jok := k.byObj[objIDs[j]]
     412           1 :                         if !jok {
     413           1 :                                 continue
     414             :                         }
     415           1 :                         if ib.bounds.Overlaps(k.comparer.Compare, jb.bounds) {
     416           1 :                                 return true
     417           1 :                         }
     418             :                 }
     419             :         }
     420           1 :         return false
     421             : }
     422             : 
     423             : // checkForSingleDelConflicts examines all the keys written to srcObj, and
     424             : // determines whether any of the contained single deletes would be
     425             : // nondeterministic if applied to dstObj in dstObj's current state. It returns a
     426             : // slice of all the keys that are found to conflict. In order to preserve
     427             : // determinism, the caller must delete the key from the destination before
     428             : // writing src's mutations to dst in order to ensure determinism.
     429             : //
     430             : // It takes a `srcCollapsed` parameter that determines whether the source
     431             : // history should be "collapsed" (see keyHistory.collapsed) before determining
     432             : // whether the applied state will conflict. This is required to facilitate
     433             : // ingestOps which are NOT equivalent to committing the batch, because they can
     434             : // only commit 1 internal point key at each unique user key.
     435           1 : func (k *keyManager) checkForSingleDelConflicts(srcObj, dstObj objID, srcCollapsed bool) [][]byte {
     436           1 :         dstKeys := k.objKeyMeta(dstObj)
     437           1 :         var conflicts [][]byte
     438           1 :         for _, src := range k.SortedKeysForObj(srcObj) {
     439           1 :                 if srcCollapsed {
     440           1 :                         src.history = src.history.collapsed()
     441           1 :                 }
     442           1 :                 if k.checkForSingleDelConflict(src, dstKeys) {
     443           1 :                         conflicts = append(conflicts, src.key)
     444           1 :                 }
     445             :         }
     446           1 :         return conflicts
     447             : }
     448             : 
     449             : // checkForSingleDelConflict returns true if applying the history of the source
     450             : // key on top of the given object results in a possible SingleDel
     451             : // nondeterminism. See checkForSingleDelConflicts.
     452           1 : func (k *keyManager) checkForSingleDelConflict(src keyMeta, dstObjKeyMeta *objKeyMeta) bool {
     453           1 :         // Single delete generation logic already ensures that both the source
     454           1 :         // object and the destination object's single deletes are deterministic
     455           1 :         // within the context of their existing writes. However, applying the source
     456           1 :         // keys on top of the destination object may violate the invariants.
     457           1 :         // Consider:
     458           1 :         //
     459           1 :         //    src: a.SET; a.SINGLEDEL;
     460           1 :         //    dst: a.SET;
     461           1 :         //
     462           1 :         // The merged view is:
     463           1 :         //
     464           1 :         //    a.SET; a.SET; a.SINGLEDEL;
     465           1 :         //
     466           1 :         // This is invalid, because there is more than 1 value mutation of the
     467           1 :         // key before the single delete.
     468           1 :         //
     469           1 :         // We walk the source object's history in chronological order, looking
     470           1 :         // for a single delete that was written before a DEL/RANGEDEL. (NB: We
     471           1 :         // don't need to look beyond a DEL/RANGEDEL, because these deletes bound
     472           1 :         // any subsequently-written single deletes to applying to the keys
     473           1 :         // within src's history between the two tombstones. We already know from
     474           1 :         // per-object history invariants that any such single delete must be
     475           1 :         // deterministic with respect to src's keys.)
     476           1 :         var srcHasUnboundedSingleDelete bool
     477           1 :         var srcValuesBeforeSingleDelete int
     478           1 : 
     479           1 :         // When the srcObj is being ingested (srcCollapsed=t), the semantics
     480           1 :         // change. We must first "collapse" the key's history to represent the
     481           1 :         // ingestion semantics.
     482           1 :         srcHistory := src.history
     483           1 : 
     484           1 : srcloop:
     485           1 :         for _, item := range srcHistory {
     486           1 :                 switch item.opType {
     487           1 :                 case OpWriterDelete, OpWriterDeleteRange:
     488           1 :                         // We found a DEL or RANGEDEL before any single delete. If src
     489           1 :                         // contains additional single deletes, their effects are limited
     490           1 :                         // to applying to later keys. Combining the two object histories
     491           1 :                         // doesn't pose any determinism risk.
     492           1 :                         return false
     493             : 
     494           1 :                 case OpWriterSingleDelete:
     495           1 :                         // We found a single delete. Since we found this single delete
     496           1 :                         // before a DEL or RANGEDEL, this delete has the potential to
     497           1 :                         // affect the visibility of keys in `dstObj`. We'll need to look
     498           1 :                         // for potential conflicts down below.
     499           1 :                         srcHasUnboundedSingleDelete = true
     500           1 :                         if srcValuesBeforeSingleDelete > 1 {
     501           0 :                                 panic(errors.AssertionFailedf("unexpectedly found %d sets/merges before single del",
     502           0 :                                         srcValuesBeforeSingleDelete))
     503             :                         }
     504           1 :                         break srcloop
     505             : 
     506           1 :                 case OpWriterSet, OpWriterMerge:
     507           1 :                         // We found a SET or MERGE operation for this key. If there's a
     508           1 :                         // subsequent single delete, we'll need to make sure there's not
     509           1 :                         // a SET or MERGE in the dst too.
     510           1 :                         srcValuesBeforeSingleDelete++
     511             : 
     512           0 :                 default:
     513           0 :                         panic(errors.AssertionFailedf("unexpected optype %d", item.opType))
     514             :                 }
     515             :         }
     516           1 :         if !srcHasUnboundedSingleDelete {
     517           1 :                 return false
     518           1 :         }
     519             : 
     520           1 :         dst, ok := dstObjKeyMeta.keys[string(src.key)]
     521           1 :         // If the destination writer has no record of the key, the combined key
     522           1 :         // history is simply the src object's key history which is valid due to
     523           1 :         // per-object single deletion invariants.
     524           1 :         if !ok {
     525           1 :                 return false
     526           1 :         }
     527             : 
     528             :         // We need to examine the trailing key history on dst.
     529           1 :         consecutiveValues := srcValuesBeforeSingleDelete
     530           1 :         for i := len(dst.history) - 1; i >= 0; i-- {
     531           1 :                 switch dst.history[i].opType {
     532           1 :                 case OpWriterSet, OpWriterMerge:
     533           1 :                         // A SET/MERGE may conflict if there's more than 1 consecutive
     534           1 :                         // SET/MERGEs.
     535           1 :                         consecutiveValues++
     536           1 :                         if consecutiveValues > 1 {
     537           1 :                                 return true
     538           1 :                         }
     539           1 :                 case OpWriterDelete, OpWriterSingleDelete, OpWriterDeleteRange:
     540           1 :                         // Dels clear the history, enabling use of single delete.
     541           1 :                         return false
     542             : 
     543           0 :                 default:
     544           0 :                         panic(errors.AssertionFailedf("unexpected optype %d", dst.history[i].opType))
     545             :                 }
     546             :         }
     547           1 :         return false
     548             : }
     549             : 
     550             : // update updates the internal state of the keyManager according to the given
     551             : // op.
     552           1 : func (k *keyManager) update(o op) {
     553           1 :         switch s := o.(type) {
     554           1 :         case *setOp:
     555           1 :                 meta := k.getOrInit(s.writerID, s.key)
     556           1 :                 meta.history = append(meta.history, keyHistoryItem{
     557           1 :                         opType:        OpWriterSet,
     558           1 :                         metaTimestamp: k.nextMetaTimestamp(),
     559           1 :                 })
     560           1 :         case *mergeOp:
     561           1 :                 meta := k.getOrInit(s.writerID, s.key)
     562           1 :                 meta.history = append(meta.history, keyHistoryItem{
     563           1 :                         opType:        OpWriterMerge,
     564           1 :                         metaTimestamp: k.nextMetaTimestamp(),
     565           1 :                 })
     566           1 :         case *deleteOp:
     567           1 :                 meta := k.getOrInit(s.writerID, s.key)
     568           1 :                 if meta.objID.tag() == dbTag {
     569           1 :                         meta.clear()
     570           1 :                 } else {
     571           1 :                         meta.history = append(meta.history, keyHistoryItem{
     572           1 :                                 opType:        OpWriterDelete,
     573           1 :                                 metaTimestamp: k.nextMetaTimestamp(),
     574           1 :                         })
     575           1 :                 }
     576           1 :         case *deleteRangeOp:
     577           1 :                 // We track the history of discrete point keys, but a range deletion
     578           1 :                 // applies over a continuous key span of infinite keys. However, the key
     579           1 :                 // manager knows all keys that have been used in all operations, so we
     580           1 :                 // can discretize the range tombstone by adding it to every known key
     581           1 :                 // within the range.
     582           1 :                 ts := k.nextMetaTimestamp()
     583           1 :                 keyRange := pebble.KeyRange{Start: s.start, End: s.end}
     584           1 :                 for _, key := range k.knownKeysInRange(keyRange) {
     585           1 :                         meta := k.getOrInit(s.writerID, key)
     586           1 :                         if meta.objID.tag() == dbTag {
     587           1 :                                 meta.clear()
     588           1 :                         } else {
     589           1 :                                 meta.history = append(meta.history, keyHistoryItem{
     590           1 :                                         opType:        OpWriterDeleteRange,
     591           1 :                                         metaTimestamp: ts,
     592           1 :                                 })
     593           1 :                         }
     594             :                 }
     595           1 :                 k.expandBounds(s.writerID, k.makeEndExclusiveBounds(s.start, s.end))
     596           1 :                 k.objKeyMeta(s.writerID).hasRangeDels = true
     597             : 
     598           1 :         case *singleDeleteOp:
     599           1 :                 meta := k.getOrInit(s.writerID, s.key)
     600           1 :                 meta.history = append(meta.history, keyHistoryItem{
     601           1 :                         opType:        OpWriterSingleDelete,
     602           1 :                         metaTimestamp: k.nextMetaTimestamp(),
     603           1 :                 })
     604           1 :         case *rangeKeyDeleteOp:
     605           1 :                 // Range key operations on their own don't determine singledel eligibility,
     606           1 :                 // however range key operations could be part of a batch which contains
     607           1 :                 // other operations that do affect it. If those batches were to get
     608           1 :                 // ingested, we'd need to know what the bounds of sstables generated out
     609           1 :                 // of those batches are, as that determines whether that ingestion
     610           1 :                 // will succeed or not.
     611           1 :                 k.expandBounds(s.writerID, k.makeEndExclusiveBounds(s.start, s.end))
     612           1 :                 k.objKeyMeta(s.writerID).hasRangeKeys = true
     613           1 :         case *rangeKeySetOp:
     614           1 :                 k.expandBounds(s.writerID, k.makeEndExclusiveBounds(s.start, s.end))
     615           1 :                 k.objKeyMeta(s.writerID).hasRangeKeys = true
     616           1 :         case *rangeKeyUnsetOp:
     617           1 :                 k.expandBounds(s.writerID, k.makeEndExclusiveBounds(s.start, s.end))
     618           1 :                 k.objKeyMeta(s.writerID).hasRangeKeys = true
     619           1 :         case *ingestOp:
     620           1 :                 // Some ingestion operations may attempt to ingest overlapping sstables
     621           1 :                 // which is prohibited. We know at generation time whether these
     622           1 :                 // ingestions will be successful. If they won't be successful, we should
     623           1 :                 // not update the key state because both the batch(es) and target DB
     624           1 :                 // will be left unmodified.
     625           1 :                 if k.doObjectBoundsOverlap(s.batchIDs) {
     626           1 :                         // This ingestion will fail.
     627           1 :                         return
     628           1 :                 }
     629             : 
     630             :                 // For each batch, merge the keys into the DB. We can't call
     631             :                 // keyMeta.mergeInto directly to merge, because ingest operations first
     632             :                 // "flatten" the batch (because you can't set the same key twice at a
     633             :                 // single sequence number). Instead we compute the collapsed history and
     634             :                 // merge that.
     635           1 :                 for _, batchID := range s.batchIDs {
     636           1 :                         k.objKeyMeta(batchID).CollapseKeys()
     637           1 :                         k.mergeObjectInto(batchID, s.dbID)
     638           1 :                 }
     639           1 :         case *ingestAndExciseOp:
     640           1 :                 // IngestAndExcise does not ingest multiple batches, so we will not see
     641           1 :                 // a failure due to overlapping sstables. However we do need to merge
     642           1 :                 // the singular batch into the key manager.
     643           1 :                 //
     644           1 :                 // Remove all keys from the key manager within the excise span before
     645           1 :                 // merging the batch into the db.
     646           1 :                 for _, key := range k.InRangeKeysForObj(s.dbID, s.exciseStart, s.exciseEnd) {
     647           1 :                         m := k.getOrInit(s.dbID, key.key)
     648           1 :                         m.clear()
     649           1 :                 }
     650           1 :                 k.objKeyMeta(s.batchID).CollapseKeys()
     651           1 :                 k.mergeObjectInto(s.batchID, s.dbID)
     652             :                 // TODO(bilal): Handle replicateOp here. We currently disable SingleDelete
     653             :                 // when these operations are enabled (see multiInstanceConfig).
     654           1 :         case *newExternalObjOp:
     655           1 :                 // Collapse and transfer the keys from the batch to the external object.
     656           1 :                 k.objKeyMeta(s.batchID).CollapseKeys()
     657           1 :                 k.mergeObjectInto(s.batchID, s.externalObjID)
     658           1 :         case *ingestExternalFilesOp:
     659           1 :                 // Merge the keys from the external objects (within the restricted bounds)
     660           1 :                 // into the database.
     661           1 :                 ts := k.nextMetaTimestamp()
     662           1 :                 dbMeta := k.objKeyMeta(s.dbID)
     663           1 :                 for _, obj := range s.objs {
     664           1 :                         for _, keyMeta := range k.KeysForExternalIngest(obj) {
     665           1 :                                 dbMeta.MergeKey(&keyMeta, ts)
     666           1 :                         }
     667           1 :                         dbMeta.bounds.Expand(k.comparer.Compare, k.makeEndExclusiveBounds(obj.bounds.Start, obj.bounds.End))
     668             :                 }
     669           1 :         case *applyOp:
     670           1 :                 // Merge the keys from this batch into the parent writer.
     671           1 :                 k.mergeObjectInto(s.batchID, s.writerID)
     672           1 :         case *batchCommitOp:
     673           1 :                 // Merge the keys from the batch with the keys from the DB.
     674           1 :                 k.mergeObjectInto(s.batchID, s.dbID)
     675             :         }
     676             : }
     677             : 
     678           1 : func (k *keyManager) knownKeys() (keys [][]byte) {
     679           1 :         return k.globalKeys
     680           1 : }
     681             : 
     682             : // knownKeysInRange returns all eligible read keys within the range
     683             : // [start,end). The returned slice is owned by the keyManager and must not be
     684             : // retained.
     685           1 : func (k *keyManager) knownKeysInRange(kr pebble.KeyRange) (keys [][]byte) {
     686           1 :         s, _ := slices.BinarySearchFunc(k.globalKeys, kr.Start, k.comparer.Compare)
     687           1 :         e, _ := slices.BinarySearchFunc(k.globalKeys, kr.End, k.comparer.Compare)
     688           1 :         if s >= e {
     689           1 :                 return nil
     690           1 :         }
     691           1 :         return k.globalKeys[s:e]
     692             : }
     693             : 
     694           1 : func (k *keyManager) prefixes() (prefixes [][]byte) {
     695           1 :         return k.globalKeyPrefixes
     696           1 : }
     697             : 
     698             : // prefixExists returns true if a key has been generated with the provided
     699             : // prefix before.
     700           1 : func (k *keyManager) prefixExists(prefix []byte) bool {
     701           1 :         _, exists := k.globalKeyPrefixesMap[string(prefix)]
     702           1 :         return exists
     703           1 : }
     704             : 
     705             : // eligibleSingleDeleteKeys returns a slice of keys that can be safely single
     706             : // deleted, given the writer id. Restricting single delete keys through this
     707             : // method is used to ensure the OLW1 guarantee (see the keyManager comment) for
     708             : // the provided object ID.
     709           1 : func (k *keyManager) eligibleSingleDeleteKeys(o objID) (keys [][]byte) {
     710           1 :         // Creating a slice of keys is wasteful given that the caller will pick one,
     711           1 :         // but makes it simpler for unit testing.
     712           1 :         objKeys := k.objKeyMeta(o)
     713           1 :         for _, key := range k.globalKeys {
     714           1 :                 meta, ok := objKeys.keys[string(key)]
     715           1 :                 if !ok {
     716           1 :                         keys = append(keys, key)
     717           1 :                         continue
     718             :                 }
     719             :                 // Examine the history within this object.
     720           1 :                 if meta.history.canSingleDelete() {
     721           1 :                         keys = append(keys, key)
     722           1 :                 }
     723             :         }
     724           1 :         return keys
     725             : }
     726             : 
     727             : // makeSingleKeyBounds creates a [key, key] bound.
     728           1 : func (k *keyManager) makeSingleKeyBounds(key []byte) bounds {
     729           1 :         return bounds{
     730           1 :                 smallest:    key,
     731           1 :                 largest:     key,
     732           1 :                 largestExcl: false,
     733           1 :         }
     734           1 : }
     735             : 
     736             : // makeEndExclusiveBounds creates a [smallest, largest) bound.
     737           1 : func (k *keyManager) makeEndExclusiveBounds(smallest, largest []byte) bounds {
     738           1 :         b := bounds{
     739           1 :                 smallest:    smallest,
     740           1 :                 largest:     largest,
     741           1 :                 largestExcl: true,
     742           1 :         }
     743           1 :         b.checkValid(k.comparer.Compare)
     744           1 :         return b
     745           1 : }
     746             : 
     747             : // a keyHistoryItem describes an individual operation performed on a key.
     748             : type keyHistoryItem struct {
     749             :         // opType may be writerSet, writerDelete, writerSingleDelete,
     750             :         // writerDeleteRange or writerMerge only. No other opTypes may appear here.
     751             :         opType        OpType
     752             :         metaTimestamp int
     753             : }
     754             : 
     755             : // keyHistory captures the history of mutations to a key in chronological order.
     756             : type keyHistory []keyHistoryItem
     757             : 
     758             : // before returns the subslice of the key history that happened strictly before
     759             : // the provided meta timestamp.
     760           1 : func (h keyHistory) before(metaTimestamp int) keyHistory {
     761           1 :         i, _ := slices.BinarySearchFunc(h, metaTimestamp, func(a keyHistoryItem, ts int) int {
     762           1 :                 return cmp.Compare(a.metaTimestamp, ts)
     763           1 :         })
     764           1 :         return h[:i]
     765             : }
     766             : 
     767             : // canSingleDelete examines the tail of the history and returns true if a single
     768             : // delete appended to this history would satisfy the single delete invariants.
     769           1 : func (h keyHistory) canSingleDelete() bool {
     770           1 :         if len(h) == 0 {
     771           1 :                 return true
     772           1 :         }
     773           1 :         switch o := h[len(h)-1].opType; o {
     774           1 :         case OpWriterDelete, OpWriterDeleteRange, OpWriterSingleDelete:
     775           1 :                 return true
     776           1 :         case OpWriterSet, OpWriterMerge:
     777           1 :                 if len(h) == 1 {
     778           1 :                         return true
     779           1 :                 }
     780           1 :                 return h[len(h)-2].opType.isDelete()
     781           0 :         default:
     782           0 :                 panic(errors.AssertionFailedf("unexpected writer op %v", o))
     783             :         }
     784             : }
     785             : 
     786           0 : func (h keyHistory) String() string {
     787           0 :         var sb strings.Builder
     788           0 :         for i, it := range h {
     789           0 :                 if i > 0 {
     790           0 :                         fmt.Fprint(&sb, ", ")
     791           0 :                 }
     792           0 :                 switch it.opType {
     793           0 :                 case OpWriterDelete:
     794           0 :                         fmt.Fprint(&sb, "del")
     795           0 :                 case OpWriterDeleteRange:
     796           0 :                         fmt.Fprint(&sb, "delrange")
     797           0 :                 case OpWriterSingleDelete:
     798           0 :                         fmt.Fprint(&sb, "singledel")
     799           0 :                 case OpWriterSet:
     800           0 :                         fmt.Fprint(&sb, "set")
     801           0 :                 case OpWriterMerge:
     802           0 :                         fmt.Fprint(&sb, "merge")
     803           0 :                 default:
     804           0 :                         fmt.Fprintf(&sb, "optype[v=%d]", it.opType)
     805             :                 }
     806           0 :                 fmt.Fprintf(&sb, "(%d)", it.metaTimestamp)
     807             :         }
     808           0 :         return sb.String()
     809             : }
     810             : 
     811             : // hasVisibleKey examines the tail of the history and returns true if the
     812             : // history should end in a visible value for this key.
     813           1 : func (h keyHistory) hasVisibleValue() bool {
     814           1 :         if len(h) == 0 {
     815           1 :                 return false
     816           1 :         }
     817           1 :         return !h[len(h)-1].opType.isDelete()
     818             : }
     819             : 
     820             : // collapsed returns a new key history that's equivalent to the history created
     821             : // by an ingestOp that "collapses" a batch's keys. See ingestOp.build.
     822           1 : func (h keyHistory) collapsed() keyHistory {
     823           1 :         var ret keyHistory
     824           1 :         // When collapsing a batch, any range deletes are semantically applied
     825           1 :         // first. Look for any range deletes and apply them.
     826           1 :         for _, op := range h {
     827           1 :                 if op.opType == OpWriterDeleteRange {
     828           1 :                         ret = append(ret, op)
     829           1 :                         break
     830             :                 }
     831             :         }
     832             :         // Among point keys, the most recently written key wins.
     833           1 :         for i := len(h) - 1; i >= 0; i-- {
     834           1 :                 if h[i].opType != OpWriterDeleteRange {
     835           1 :                         ret = append(ret, h[i])
     836           1 :                         break
     837             :                 }
     838             :         }
     839           1 :         return ret
     840             : }
     841             : 
     842           1 : func opWrittenKeys(untypedOp op) [][]byte {
     843           1 :         switch t := untypedOp.(type) {
     844           1 :         case *applyOp:
     845           1 :         case *batchCommitOp:
     846           1 :         case *checkpointOp:
     847           1 :         case *closeOp:
     848           1 :         case *compactOp:
     849           1 :         case *dbRestartOp:
     850           1 :         case *deleteOp:
     851           1 :                 return [][]byte{t.key}
     852           1 :         case *deleteRangeOp:
     853           1 :                 return [][]byte{t.start, t.end}
     854           1 :         case *flushOp:
     855           1 :         case *getOp:
     856           1 :         case *ingestOp:
     857           1 :         case *initOp:
     858           1 :         case *iterFirstOp:
     859           1 :         case *iterLastOp:
     860           1 :         case *iterNextOp:
     861           1 :         case *iterNextPrefixOp:
     862           1 :         case *iterCanSingleDelOp:
     863           1 :         case *iterPrevOp:
     864           1 :         case *iterSeekGEOp:
     865           1 :         case *iterSeekLTOp:
     866           1 :         case *iterSeekPrefixGEOp:
     867           1 :         case *iterSetBoundsOp:
     868           1 :         case *iterSetOptionsOp:
     869           1 :         case *mergeOp:
     870           1 :                 return [][]byte{t.key}
     871           1 :         case *newBatchOp:
     872           1 :         case *newIndexedBatchOp:
     873           1 :         case *newIterOp:
     874           1 :         case *newIterUsingCloneOp:
     875           1 :         case *newSnapshotOp:
     876           1 :         case *rangeKeyDeleteOp:
     877           1 :         case *rangeKeySetOp:
     878           1 :         case *rangeKeyUnsetOp:
     879           1 :         case *setOp:
     880           1 :                 return [][]byte{t.key}
     881           1 :         case *singleDeleteOp:
     882           1 :                 return [][]byte{t.key}
     883           1 :         case *replicateOp:
     884           1 :                 return [][]byte{t.start, t.end}
     885             :         }
     886           1 :         return nil
     887             : }
     888             : 
     889           1 : func loadPrecedingKeys(t TestingT, ops []op, cfg *OpConfig, m *keyManager) {
     890           1 :         for _, op := range ops {
     891           1 :                 // Pretend we're generating all the operation's keys as potential new
     892           1 :                 // key, so that we update the key manager's keys and prefix sets.
     893           1 :                 for _, k := range opWrittenKeys(op) {
     894           1 :                         m.addNewKey(k)
     895           1 : 
     896           1 :                         // If the key has a suffix, ratchet up the suffix distribution if
     897           1 :                         // necessary.
     898           1 :                         if s := m.comparer.Split(k); s < len(k) {
     899           1 :                                 suffix, err := testkeys.ParseSuffix(k[s:])
     900           1 :                                 require.NoError(t, err)
     901           1 :                                 if uint64(suffix) > cfg.writeSuffixDist.Max() {
     902           1 :                                         diff := int(uint64(suffix) - cfg.writeSuffixDist.Max())
     903           1 :                                         cfg.writeSuffixDist.IncMax(diff)
     904           1 :                                 }
     905             :                         }
     906             :                 }
     907             : 
     908             :                 // Update key tracking state.
     909           1 :                 m.update(op)
     910             :         }
     911             : }
     912             : 
     913           1 : func insertSorted(cmp base.Compare, dst *[][]byte, k []byte) {
     914           1 :         s := *dst
     915           1 :         i, _ := slices.BinarySearchFunc(s, k, cmp)
     916           1 :         *dst = slices.Insert(s, i, k)
     917           1 : }

Generated by: LCOV version 1.14