LCOV - code coverage report
Current view: top level - pebble/metamorphic - key_manager.go (source / functions) Hit Total Coverage
Test: 2024-03-19 08:15Z 13bbeea1 - tests + meta.lcov Lines: 518 564 91.8 %
Date: 2024-03-19 08:17:12 Functions: 0 0 -

          Line data    Source code
       1             : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
       2             : // of this source code is governed by a BSD-style license that can be found in
       3             : // the LICENSE file.
       4             : 
       5             : package metamorphic
       6             : 
       7             : import (
       8             :         "cmp"
       9             :         "fmt"
      10             :         "slices"
      11             :         "strings"
      12             : 
      13             :         "github.com/cockroachdb/errors"
      14             :         "github.com/cockroachdb/pebble"
      15             :         "github.com/cockroachdb/pebble/internal/base"
      16             :         "github.com/cockroachdb/pebble/internal/testkeys"
      17             :         "github.com/stretchr/testify/require"
      18             : )
      19             : 
      20             : // keyMeta is metadata associated with an (objID, key) pair, where objID is
      21             : // a writer containing the key.
      22             : type keyMeta struct {
      23             :         objID objID
      24             :         key   []byte
      25             :         // history provides the history of writer operations applied against this
      26             :         // key on this object. history is always ordered by non-decreasing
      27             :         // metaTimestamp.
      28             :         history keyHistory
      29             : }
      30             : 
      31           1 : func (m *keyMeta) clear() {
      32           1 :         m.history = m.history[:0]
      33           1 : }
      34             : 
      35             : // mergeInto merges this metadata into the metadata for other, appending all of
      36             : // its individual operations to dst at the provided timestamp.
      37           1 : func (m *keyMeta) mergeInto(dst *keyMeta, ts int) {
      38           1 :         for _, op := range m.history {
      39           1 :                 // If the key is being merged into a database object and the operation
      40           1 :                 // is a delete, we can clear the destination history. Database objects
      41           1 :                 // are end points in the merging of keys and won't be the source of a
      42           1 :                 // future merge. Deletions cause all other operations to behave as
      43           1 :                 // though the key was never written to the database at all, so we don't
      44           1 :                 // need to consider it for maintaining single delete invariants.
      45           1 :                 //
      46           1 :                 // NB: There's a subtlety here in that isDelete() will return true if
      47           1 :                 // opType is a writerSingleDelete, but single deletes are capable of
      48           1 :                 // leaking information about the history of writes. However, that's
      49           1 :                 // okay, because as long as we're properly generating single deletes
      50           1 :                 // according to the W1 invariant described in keyManager's comment, a
      51           1 :                 // single delete is equivalent to delete for the current history.
      52           1 :                 if dst.objID.tag() == dbTag && op.opType.isDelete() {
      53           1 :                         dst.clear()
      54           1 :                         continue
      55             :                 }
      56           1 :                 dst.history = append(dst.history, keyHistoryItem{
      57           1 :                         opType:        op.opType,
      58           1 :                         metaTimestamp: ts,
      59           1 :                 })
      60             :         }
      61             : }
      62             : 
      63             : type bounds struct {
      64             :         smallest    []byte
      65             :         largest     []byte
      66             :         largestExcl bool // is largest exclusive?
      67             : }
      68             : 
      69           1 : func (b bounds) checkValid(cmp base.Compare) {
      70           1 :         if c := cmp(b.smallest, b.largest); c > 0 {
      71           0 :                 panic(fmt.Sprintf("invalid bound [%q, %q]", b.smallest, b.largest))
      72           1 :         } else if c == 0 && b.largestExcl {
      73           0 :                 panic(fmt.Sprintf("invalid bound [%q, %q)", b.smallest, b.largest))
      74             :         }
      75             : }
      76             : 
      77           1 : func (b bounds) String() string {
      78           1 :         if b.largestExcl {
      79           1 :                 return fmt.Sprintf("[%q,%q)", b.smallest, b.largest)
      80           1 :         }
      81           1 :         return fmt.Sprintf("[%q,%q]", b.smallest, b.largest)
      82             : }
      83             : 
      84             : // Overlaps returns true iff the bounds intersect.
      85           1 : func (b *bounds) Overlaps(cmp base.Compare, other bounds) bool {
      86           1 :         if b.IsUnset() || other.IsUnset() {
      87           1 :                 return false
      88           1 :         }
      89             :         // Is b strictly before other?
      90           1 :         if v := cmp(b.largest, other.smallest); v < 0 || (v == 0 && b.largestExcl) {
      91           1 :                 return false
      92           1 :         }
      93             :         // Is b strictly after other?
      94           1 :         if v := cmp(b.smallest, other.largest); v > 0 || (v == 0 && other.largestExcl) {
      95           1 :                 return false
      96           1 :         }
      97           1 :         return true
      98             : }
      99             : 
     100             : // IsUnset returns true if the bounds haven't been set.
     101           1 : func (b bounds) IsUnset() bool {
     102           1 :         return b.smallest == nil && b.largest == nil
     103           1 : }
     104             : 
     105             : // Expand potentially expands the receiver bounds to include the other given
     106             : // bounds. If the receiver is unset, the other bounds are copied.
     107           1 : func (b *bounds) Expand(cmp base.Compare, other bounds) {
     108           1 :         if other.IsUnset() {
     109           1 :                 return
     110           1 :         }
     111           1 :         other.checkValid(cmp)
     112           1 :         if b.IsUnset() {
     113           1 :                 *b = other
     114           1 :                 return
     115           1 :         }
     116           1 :         if cmp(b.smallest, other.smallest) > 0 {
     117           1 :                 b.smallest = other.smallest
     118           1 :         }
     119           1 :         if v := cmp(b.largest, other.largest); v < 0 || (v == 0 && b.largestExcl) {
     120           1 :                 b.largest = other.largest
     121           1 :                 b.largestExcl = other.largestExcl
     122           1 :         }
     123             : }
     124             : 
     125             : // keyManager tracks the write operations performed on keys in the generation
     126             : // phase of the metamorphic test. It maintains histories of operations performed
     127             : // against every unique user key on every writer object. These histories inform
     128             : // operation generation in order to maintain invariants that Pebble requires of
     129             : // end users, mostly around single deletions.
     130             : //
     131             : // A single deletion has a subtle requirement of the writer:
     132             : //
     133             : //      W1: The writer may only single delete a key `k` if `k` has been Set once
     134             : //          (and never MergeD) since the last delete.
     135             : //
     136             : // When a SINGLEDEL key deletes a SET key within a compaction, both the SET and
     137             : // the SINGLEDEL keys are elided. If multiple SETs of the key exist within the
     138             : // LSM, the SINGLEDEL reveals the lower SET. This behavior is dependent on the
     139             : // internal LSM state and nondeterministic. To ensure determinism, the end user
     140             : // must satisfy W1 and use single delete only when they can guarantee that the
     141             : // key has been set at most once since the last delete, preventing this rollback
     142             : // to a previous value.
     143             : //
     144             : // This W1 invariant requires a delicate dance during operation generation,
     145             : // because independent batches may be independently built and committed. With
     146             : // multi-instance variants of the metamorphic tests, keys in batches may
     147             : // ultimately be committed to any of several DB instances. To satisfy these
     148             : // requirements, the key manager tracks the history of every key on every
     149             : // writable object. When generating a new single deletion operation, the
     150             : // generator asks the key manager for a set of keys for which a single delete
     151             : // maintains the W1 invariant within the object itself. This object-local W1
     152             : // invariant (OLW1) is equivalent to W1 if one only ever performs write
     153             : // operations directly against individual DB objects.
     154             : //
     155             : // However with the existence of batches that receive writes independent of DB
     156             : // objects, W1 may be violated by appending the histories of two objects that
     157             : // independently satisfy OLW1. Consider a sequence such as:
     158             : //
     159             : //  1. db1.Set("foo")
     160             : //  2. batch1.Set("foo")
     161             : //  3. batch1.SingleDelete("foo")
     162             : //  4. db1.Apply(batch1)
     163             : //
     164             : // Both db1 and batch1 satisfy the object-local invariant OLW1. However the
     165             : // composition of the histories created by appending batch1's operations to db1
     166             : // creates a history that now violates W1 on db1. To detect this violation,
     167             : // batch applications/commits and ingestions examine the tail of the destination
     168             : // object's history and the head of the source batch's history. When a violation
     169             : // is detected, these operations insert additional Delete operations to clear
     170             : // the conflicting keys before proceeding with the conflicting operation. These
     171             : // deletes reset the key history.
     172             : //
     173             : // Note that this generation-time key tracking requires that operations be
     174             : // infallible, because a runtime failure would cause the key manager's state to
     175             : // diverge from the runtime object state. Ingestion operations pose an obstacle,
     176             : // because the generator may generate ingestions that fail due to overlapping
     177             : // sstables. Today, this complication is sidestepped by avoiding ingestion of
     178             : // multiple batches containing deletes or single deletes since loss of those
     179             : // specific operations on a key are what we cannot tolerate (doing SingleDelete
     180             : // on a key that has not been written to because the Set was lost is harmless).
     181             : //
     182             : // TODO(jackson): Instead, compute smallest and largest bounds of batches so
     183             : // that we know at generation-time whether or not an ingestion operation will
     184             : // fail and can avoid updating key state.
     185             : type keyManager struct {
     186             :         comparer *base.Comparer
     187             : 
     188             :         // metaTimestamp is used to provide a ordering over certain operations like
     189             :         // iter creation, updates to keys. Keeping track of the timestamp allows us
     190             :         // to make determinations such as whether a key will be visible to an
     191             :         // iterator.
     192             :         metaTimestamp int
     193             : 
     194             :         byObj map[objID]*objKeyMeta
     195             :         // globalKeys represents all the keys that have been generated so far. Not
     196             :         // all these keys have been written to. globalKeys is sorted.
     197             :         globalKeys [][]byte
     198             :         // globalKeysMap contains the same keys as globalKeys but in a map. It
     199             :         // ensures no duplication.
     200             :         globalKeysMap map[string]bool
     201             :         // globalKeyPrefixes contains all the key prefixes (as defined by the
     202             :         // comparer's Split) generated so far. globalKeyPrefixes is sorted.
     203             :         globalKeyPrefixes [][]byte
     204             :         // globalKeyPrefixesMap contains the same keys as globalKeyPrefixes. It
     205             :         // ensures no duplication.
     206             :         globalKeyPrefixesMap map[string]struct{}
     207             : }
     208             : 
     209             : type objKeyMeta struct {
     210             :         id objID
     211             :         // List of keys, and what has happened to each in this object.
     212             :         // Will be transferred when needed.
     213             :         keys map[string]*keyMeta
     214             :         // bounds holds user key bounds encompassing all the keys set within an
     215             :         // object. It's updated within `update` when a new op is generated.
     216             :         bounds bounds
     217             :         // These flags are true if the object has had range del or range key operations.
     218             :         hasRangeDels bool
     219             :         hasRangeKeys bool
     220             : }
     221             : 
     222             : // MergeKey adds the given key at the given meta timestamp, merging the histories as needed.
     223           1 : func (okm *objKeyMeta) MergeKey(k *keyMeta, ts int) {
     224           1 :         meta, ok := okm.keys[string(k.key)]
     225           1 :         if !ok {
     226           1 :                 meta = &keyMeta{
     227           1 :                         objID: okm.id,
     228           1 :                         key:   k.key,
     229           1 :                 }
     230           1 :                 okm.keys[string(k.key)] = meta
     231           1 :         }
     232           1 :         k.mergeInto(meta, ts)
     233             : }
     234             : 
     235             : // CollapseKeys collapses the history of all keys. Used with ingestion operation
     236             : // which only use the last value of any given key.
     237           1 : func (okm *objKeyMeta) CollapseKeys() {
     238           1 :         for _, keyMeta := range okm.keys {
     239           1 :                 keyMeta.history = keyMeta.history.collapsed()
     240           1 :         }
     241             : }
     242             : 
     243             : // MergeFrom merges the `from` metadata into this one, appending all of its
     244             : // individual operations at the provided timestamp.
     245           1 : func (okm *objKeyMeta) MergeFrom(from *objKeyMeta, metaTimestamp int, cmp base.Compare) {
     246           1 :         // The result should be the same regardless of the ordering of the keys.
     247           1 :         for _, k := range from.keys {
     248           1 :                 okm.MergeKey(k, metaTimestamp)
     249           1 :         }
     250           1 :         okm.bounds.Expand(cmp, from.bounds)
     251           1 :         okm.hasRangeDels = okm.hasRangeDels || from.hasRangeDels
     252           1 :         okm.hasRangeKeys = okm.hasRangeKeys || from.hasRangeKeys
     253             : }
     254             : 
     255             : // objKeyMeta looks up the objKeyMeta for a given object, creating it if necessary.
     256           1 : func (k *keyManager) objKeyMeta(o objID) *objKeyMeta {
     257           1 :         m, ok := k.byObj[o]
     258           1 :         if !ok {
     259           1 :                 m = &objKeyMeta{
     260           1 :                         id:   o,
     261           1 :                         keys: make(map[string]*keyMeta),
     262           1 :                 }
     263           1 :                 k.byObj[o] = m
     264           1 :         }
     265           1 :         return m
     266             : }
     267             : 
     268             : // SortedKeysForObj returns all the entries in objKeyMeta(o).keys, in sorted
     269             : // order.
     270           1 : func (k *keyManager) SortedKeysForObj(o objID) []keyMeta {
     271           1 :         okm := k.objKeyMeta(o)
     272           1 :         res := make([]keyMeta, 0, len(okm.keys))
     273           1 :         for _, m := range okm.keys {
     274           1 :                 res = append(res, *m)
     275           1 :         }
     276           1 :         slices.SortFunc(res, func(a, b keyMeta) int {
     277           1 :                 cmp := k.comparer.Compare(a.key, b.key)
     278           1 :                 if cmp == 0 {
     279           0 :                         panic(fmt.Sprintf("distinct keys %q and %q compared as equal", a.key, b.key))
     280             :                 }
     281           1 :                 return cmp
     282             :         })
     283           1 :         return res
     284             : }
     285             : 
     286             : // InRangeKeysForObj returns all keys in the range [lower, upper) associated with the
     287             : // given object, in sorted order.
     288           1 : func (k *keyManager) InRangeKeysForObj(o objID, lower, upper []byte) []keyMeta {
     289           1 :         var inRangeKeys []keyMeta
     290           1 :         for _, km := range k.SortedKeysForObj(o) {
     291           0 :                 if k.comparer.Compare(km.key, lower) >= 0 && k.comparer.Compare(km.key, upper) < 0 {
     292           0 :                         inRangeKeys = append(inRangeKeys, km)
     293           0 :                 }
     294             :         }
     295           1 :         return inRangeKeys
     296             : 
     297             : }
     298             : 
     299             : // KeysForExternalIngest returns the keys that will be ingested with an external
     300             : // object (taking into consideration the bounds, synthetic suffix, etc).
     301           1 : func (k *keyManager) KeysForExternalIngest(obj externalObjWithBounds) []keyMeta {
     302           1 :         var res []keyMeta
     303           1 :         for _, km := range k.SortedKeysForObj(obj.externalObjID) {
     304           1 :                 // Apply prefix and suffix changes, then check the bounds.
     305           1 :                 if obj.syntheticPrefix.IsSet() {
     306           1 :                         km.key = obj.syntheticPrefix.Apply(km.key)
     307           1 :                 }
     308           1 :                 if obj.syntheticSuffix.IsSet() {
     309           1 :                         n := k.comparer.Split(km.key)
     310           1 :                         km.key = append(km.key[:n:n], obj.syntheticSuffix...)
     311           1 :                 }
     312           1 :                 if k.comparer.Compare(km.key, obj.bounds.Start) >= 0 && k.comparer.Compare(km.key, obj.bounds.End) < 0 {
     313           1 :                         res = append(res, km)
     314           1 :                 }
     315             :         }
     316           1 :         return res
     317             : }
     318             : 
     319           1 : func (k *keyManager) nextMetaTimestamp() int {
     320           1 :         ret := k.metaTimestamp
     321           1 :         k.metaTimestamp++
     322           1 :         return ret
     323           1 : }
     324             : 
     325             : // newKeyManager returns a pointer to a new keyManager. Callers should
     326             : // interact with this using addNewKey, knownKeys, update methods only.
     327           1 : func newKeyManager(numInstances int) *keyManager {
     328           1 :         m := &keyManager{
     329           1 :                 comparer:             testkeys.Comparer,
     330           1 :                 byObj:                make(map[objID]*objKeyMeta),
     331           1 :                 globalKeysMap:        make(map[string]bool),
     332           1 :                 globalKeyPrefixesMap: make(map[string]struct{}),
     333           1 :         }
     334           1 :         for i := 1; i <= max(numInstances, 1); i++ {
     335           1 :                 m.objKeyMeta(makeObjID(dbTag, uint32(i)))
     336           1 :         }
     337           1 :         return m
     338             : }
     339             : 
     340             : // addNewKey adds the given key to the key manager for global key tracking.
     341             : // Returns false iff this is not a new key.
     342           1 : func (k *keyManager) addNewKey(key []byte) bool {
     343           1 :         if k.globalKeysMap[string(key)] {
     344           1 :                 return false
     345           1 :         }
     346           1 :         insertSorted(k.comparer.Compare, &k.globalKeys, key)
     347           1 :         k.globalKeysMap[string(key)] = true
     348           1 : 
     349           1 :         prefixLen := k.comparer.Split(key)
     350           1 :         if prefixLen == 0 {
     351           0 :                 panic(fmt.Sprintf("key %q has zero length prefix", key))
     352             :         }
     353           1 :         if _, ok := k.globalKeyPrefixesMap[string(key[:prefixLen])]; !ok {
     354           1 :                 insertSorted(k.comparer.Compare, &k.globalKeyPrefixes, key[:prefixLen])
     355           1 :                 k.globalKeyPrefixesMap[string(key[:prefixLen])] = struct{}{}
     356           1 :         }
     357           1 :         return true
     358             : }
     359             : 
     360             : // getOrInit returns the keyMeta for the (objID, key) pair, if it exists, else
     361             : // allocates, initializes and returns a new value.
     362           1 : func (k *keyManager) getOrInit(id objID, key []byte) *keyMeta {
     363           1 :         objKeys := k.objKeyMeta(id)
     364           1 :         m, ok := objKeys.keys[string(key)]
     365           1 :         if ok {
     366           1 :                 return m
     367           1 :         }
     368           1 :         m = &keyMeta{
     369           1 :                 objID: id,
     370           1 :                 key:   key,
     371           1 :         }
     372           1 :         // Initialize the key-to-meta index.
     373           1 :         objKeys.keys[string(key)] = m
     374           1 :         // Expand the object's bounds to contain this key if they don't already.
     375           1 :         objKeys.bounds.Expand(k.comparer.Compare, k.makeSingleKeyBounds(key))
     376           1 :         return m
     377             : }
     378             : 
     379             : // mergeObjectInto merges obj key metadata from an object into another and
     380             : // deletes the metadata for the source object (which must not be used again).
     381           1 : func (k *keyManager) mergeObjectInto(from, to objID) {
     382           1 :         toMeta := k.objKeyMeta(to)
     383           1 :         ts := k.nextMetaTimestamp()
     384           1 :         toMeta.MergeFrom(k.objKeyMeta(from), ts, k.comparer.Compare)
     385           1 : 
     386           1 :         delete(k.byObj, from)
     387           1 : }
     388             : 
     389             : // expandBounds expands the incrementally maintained bounds of o to be at least
     390             : // as wide as `b`.
     391           1 : func (k *keyManager) expandBounds(o objID, b bounds) {
     392           1 :         k.objKeyMeta(o).bounds.Expand(k.comparer.Compare, b)
     393           1 : }
     394             : 
     395             : // doObjectBoundsOverlap returns true iff any of the named objects have key
     396             : // bounds that overlap any other named object.
     397           1 : func (k *keyManager) doObjectBoundsOverlap(objIDs []objID) bool {
     398           1 :         for i := range objIDs {
     399           1 :                 ib, iok := k.byObj[objIDs[i]]
     400           1 :                 if !iok {
     401           1 :                         continue
     402             :                 }
     403           1 :                 for j := i + 1; j < len(objIDs); j++ {
     404           1 :                         jb, jok := k.byObj[objIDs[j]]
     405           1 :                         if !jok {
     406           1 :                                 continue
     407             :                         }
     408           1 :                         if ib.bounds.Overlaps(k.comparer.Compare, jb.bounds) {
     409           1 :                                 return true
     410           1 :                         }
     411             :                 }
     412             :         }
     413           1 :         return false
     414             : }
     415             : 
     416             : // checkForSingleDelConflicts examines all the keys written to srcObj, and
     417             : // determines whether any of the contained single deletes would be
     418             : // nondeterministic if applied to dstObj in dstObj's current state. It returns a
     419             : // slice of all the keys that are found to conflict. In order to preserve
     420             : // determinism, the caller must delete the key from the destination before
     421             : // writing src's mutations to dst in order to ensure determinism.
     422             : //
     423             : // It takes a `srcCollapsed` parameter that determines whether the source
     424             : // history should be "collapsed" (see keyHistory.collapsed) before determining
     425             : // whether the applied state will conflict. This is required to facilitate
     426             : // ingestOps which are NOT equivalent to committing the batch, because they can
     427             : // only commit 1 internal point key at each unique user key.
     428           1 : func (k *keyManager) checkForSingleDelConflicts(srcObj, dstObj objID, srcCollapsed bool) [][]byte {
     429           1 :         dstKeys := k.objKeyMeta(dstObj)
     430           1 :         var conflicts [][]byte
     431           1 :         for _, src := range k.SortedKeysForObj(srcObj) {
     432           1 :                 if srcCollapsed {
     433           1 :                         src.history = src.history.collapsed()
     434           1 :                 }
     435           1 :                 if k.checkForSingleDelConflict(src, dstKeys) {
     436           1 :                         conflicts = append(conflicts, src.key)
     437           1 :                 }
     438             :         }
     439           1 :         return conflicts
     440             : }
     441             : 
     442             : // checkForSingleDelConflict returns true if applying the history of the source
     443             : // key on top of the given object results in a possible SingleDel
     444             : // nondeterminism. See checkForSingleDelConflicts.
     445           1 : func (k *keyManager) checkForSingleDelConflict(src keyMeta, dstObjKeyMeta *objKeyMeta) bool {
     446           1 :         // Single delete generation logic already ensures that both the source
     447           1 :         // object and the destination object's single deletes are deterministic
     448           1 :         // within the context of their existing writes. However, applying the source
     449           1 :         // keys on top of the destination object may violate the invariants.
     450           1 :         // Consider:
     451           1 :         //
     452           1 :         //    src: a.SET; a.SINGLEDEL;
     453           1 :         //    dst: a.SET;
     454           1 :         //
     455           1 :         // The merged view is:
     456           1 :         //
     457           1 :         //    a.SET; a.SET; a.SINGLEDEL;
     458           1 :         //
     459           1 :         // This is invalid, because there is more than 1 value mutation of the
     460           1 :         // key before the single delete.
     461           1 :         //
     462           1 :         // We walk the source object's history in chronological order, looking
     463           1 :         // for a single delete that was written before a DEL/RANGEDEL. (NB: We
     464           1 :         // don't need to look beyond a DEL/RANGEDEL, because these deletes bound
     465           1 :         // any subsequently-written single deletes to applying to the keys
     466           1 :         // within src's history between the two tombstones. We already know from
     467           1 :         // per-object history invariants that any such single delete must be
     468           1 :         // deterministic with respect to src's keys.)
     469           1 :         var srcHasUnboundedSingleDelete bool
     470           1 :         var srcValuesBeforeSingleDelete int
     471           1 : 
     472           1 :         // When the srcObj is being ingested (srcCollapsed=t), the semantics
     473           1 :         // change. We must first "collapse" the key's history to represent the
     474           1 :         // ingestion semantics.
     475           1 :         srcHistory := src.history
     476           1 : 
     477           1 : srcloop:
     478           1 :         for _, item := range srcHistory {
     479           1 :                 switch item.opType {
     480           1 :                 case OpWriterDelete, OpWriterDeleteRange:
     481           1 :                         // We found a DEL or RANGEDEL before any single delete. If src
     482           1 :                         // contains additional single deletes, their effects are limited
     483           1 :                         // to applying to later keys. Combining the two object histories
     484           1 :                         // doesn't pose any determinism risk.
     485           1 :                         return false
     486             : 
     487           1 :                 case OpWriterSingleDelete:
     488           1 :                         // We found a single delete. Since we found this single delete
     489           1 :                         // before a DEL or RANGEDEL, this delete has the potential to
     490           1 :                         // affect the visibility of keys in `dstObj`. We'll need to look
     491           1 :                         // for potential conflicts down below.
     492           1 :                         srcHasUnboundedSingleDelete = true
     493           1 :                         if srcValuesBeforeSingleDelete > 1 {
     494           0 :                                 panic(errors.AssertionFailedf("unexpectedly found %d sets/merges before single del",
     495           0 :                                         srcValuesBeforeSingleDelete))
     496             :                         }
     497           1 :                         break srcloop
     498             : 
     499           1 :                 case OpWriterSet, OpWriterMerge:
     500           1 :                         // We found a SET or MERGE operation for this key. If there's a
     501           1 :                         // subsequent single delete, we'll need to make sure there's not
     502           1 :                         // a SET or MERGE in the dst too.
     503           1 :                         srcValuesBeforeSingleDelete++
     504             : 
     505           0 :                 default:
     506           0 :                         panic(errors.AssertionFailedf("unexpected optype %d", item.opType))
     507             :                 }
     508             :         }
     509           1 :         if !srcHasUnboundedSingleDelete {
     510           1 :                 return false
     511           1 :         }
     512             : 
     513           1 :         dst, ok := dstObjKeyMeta.keys[string(src.key)]
     514           1 :         // If the destination writer has no record of the key, the combined key
     515           1 :         // history is simply the src object's key history which is valid due to
     516           1 :         // per-object single deletion invariants.
     517           1 :         if !ok {
     518           1 :                 return false
     519           1 :         }
     520             : 
     521             :         // We need to examine the trailing key history on dst.
     522           1 :         consecutiveValues := srcValuesBeforeSingleDelete
     523           1 :         for i := len(dst.history) - 1; i >= 0; i-- {
     524           1 :                 switch dst.history[i].opType {
     525           1 :                 case OpWriterSet, OpWriterMerge:
     526           1 :                         // A SET/MERGE may conflict if there's more than 1 consecutive
     527           1 :                         // SET/MERGEs.
     528           1 :                         consecutiveValues++
     529           1 :                         if consecutiveValues > 1 {
     530           1 :                                 return true
     531           1 :                         }
     532           1 :                 case OpWriterDelete, OpWriterSingleDelete, OpWriterDeleteRange:
     533           1 :                         // Dels clear the history, enabling use of single delete.
     534           1 :                         return false
     535             : 
     536           0 :                 default:
     537           0 :                         panic(errors.AssertionFailedf("unexpected optype %d", dst.history[i].opType))
     538             :                 }
     539             :         }
     540           1 :         return false
     541             : }
     542             : 
     543             : // update updates the internal state of the keyManager according to the given
     544             : // op.
     545           1 : func (k *keyManager) update(o op) {
     546           1 :         switch s := o.(type) {
     547           1 :         case *setOp:
     548           1 :                 meta := k.getOrInit(s.writerID, s.key)
     549           1 :                 meta.history = append(meta.history, keyHistoryItem{
     550           1 :                         opType:        OpWriterSet,
     551           1 :                         metaTimestamp: k.nextMetaTimestamp(),
     552           1 :                 })
     553           1 :         case *mergeOp:
     554           1 :                 meta := k.getOrInit(s.writerID, s.key)
     555           1 :                 meta.history = append(meta.history, keyHistoryItem{
     556           1 :                         opType:        OpWriterMerge,
     557           1 :                         metaTimestamp: k.nextMetaTimestamp(),
     558           1 :                 })
     559           1 :         case *deleteOp:
     560           1 :                 meta := k.getOrInit(s.writerID, s.key)
     561           1 :                 if meta.objID.tag() == dbTag {
     562           1 :                         meta.clear()
     563           1 :                 } else {
     564           1 :                         meta.history = append(meta.history, keyHistoryItem{
     565           1 :                                 opType:        OpWriterDelete,
     566           1 :                                 metaTimestamp: k.nextMetaTimestamp(),
     567           1 :                         })
     568           1 :                 }
     569           1 :         case *deleteRangeOp:
     570           1 :                 // We track the history of discrete point keys, but a range deletion
     571           1 :                 // applies over a continuous key span of infinite keys. However, the key
     572           1 :                 // manager knows all keys that have been used in all operations, so we
     573           1 :                 // can discretize the range tombstone by adding it to every known key
     574           1 :                 // within the range.
     575           1 :                 ts := k.nextMetaTimestamp()
     576           1 :                 keyRange := pebble.KeyRange{Start: s.start, End: s.end}
     577           1 :                 for _, key := range k.knownKeysInRange(keyRange) {
     578           1 :                         meta := k.getOrInit(s.writerID, key)
     579           1 :                         if meta.objID.tag() == dbTag {
     580           1 :                                 meta.clear()
     581           1 :                         } else {
     582           1 :                                 meta.history = append(meta.history, keyHistoryItem{
     583           1 :                                         opType:        OpWriterDeleteRange,
     584           1 :                                         metaTimestamp: ts,
     585           1 :                                 })
     586           1 :                         }
     587             :                 }
     588           1 :                 k.expandBounds(s.writerID, k.makeEndExclusiveBounds(s.start, s.end))
     589           1 :                 k.objKeyMeta(s.writerID).hasRangeDels = true
     590             : 
     591           1 :         case *singleDeleteOp:
     592           1 :                 meta := k.getOrInit(s.writerID, s.key)
     593           1 :                 meta.history = append(meta.history, keyHistoryItem{
     594           1 :                         opType:        OpWriterSingleDelete,
     595           1 :                         metaTimestamp: k.nextMetaTimestamp(),
     596           1 :                 })
     597           1 :         case *rangeKeyDeleteOp:
     598           1 :                 // Range key operations on their own don't determine singledel eligibility,
     599           1 :                 // however range key operations could be part of a batch which contains
     600           1 :                 // other operations that do affect it. If those batches were to get
     601           1 :                 // ingested, we'd need to know what the bounds of sstables generated out
     602           1 :                 // of those batches are, as that determines whether that ingestion
     603           1 :                 // will succeed or not.
     604           1 :                 k.expandBounds(s.writerID, k.makeEndExclusiveBounds(s.start, s.end))
     605           1 :                 k.objKeyMeta(s.writerID).hasRangeKeys = true
     606           1 :         case *rangeKeySetOp:
     607           1 :                 k.expandBounds(s.writerID, k.makeEndExclusiveBounds(s.start, s.end))
     608           1 :                 k.objKeyMeta(s.writerID).hasRangeKeys = true
     609           1 :         case *rangeKeyUnsetOp:
     610           1 :                 k.expandBounds(s.writerID, k.makeEndExclusiveBounds(s.start, s.end))
     611           1 :                 k.objKeyMeta(s.writerID).hasRangeKeys = true
     612           1 :         case *ingestOp:
     613           1 :                 // Some ingestion operations may attempt to ingest overlapping sstables
     614           1 :                 // which is prohibited. We know at generation time whether these
     615           1 :                 // ingestions will be successful. If they won't be successful, we should
     616           1 :                 // not update the key state because both the batch(es) and target DB
     617           1 :                 // will be left unmodified.
     618           1 :                 if k.doObjectBoundsOverlap(s.batchIDs) {
     619           1 :                         // This ingestion will fail.
     620           1 :                         return
     621           1 :                 }
     622             : 
     623             :                 // For each batch, merge the keys into the DB. We can't call
     624             :                 // keyMeta.mergeInto directly to merge, because ingest operations first
     625             :                 // "flatten" the batch (because you can't set the same key twice at a
     626             :                 // single sequence number). Instead we compute the collapsed history and
     627             :                 // merge that.
     628           1 :                 for _, batchID := range s.batchIDs {
     629           1 :                         k.objKeyMeta(batchID).CollapseKeys()
     630           1 :                         k.mergeObjectInto(batchID, s.dbID)
     631           1 :                 }
     632             :                 // TODO(bilal): Handle ingestAndExciseOp and replicateOp here. We currently
     633             :                 // disable SingleDelete when these operations are enabled (see
     634             :                 // multiInstanceConfig).
     635           1 :         case *newExternalObjOp:
     636           1 :                 // Collapse and transfer the keys from the batch to the external object.
     637           1 :                 k.objKeyMeta(s.batchID).CollapseKeys()
     638           1 :                 k.mergeObjectInto(s.batchID, s.externalObjID)
     639           1 :         case *ingestExternalFilesOp:
     640           1 :                 // Merge the keys from the external objects (within the restricted bounds)
     641           1 :                 // into the database.
     642           1 :                 ts := k.nextMetaTimestamp()
     643           1 :                 dbMeta := k.objKeyMeta(s.dbID)
     644           1 :                 for _, obj := range s.objs {
     645           1 :                         for _, keyMeta := range k.KeysForExternalIngest(obj) {
     646           1 :                                 dbMeta.MergeKey(&keyMeta, ts)
     647           1 :                         }
     648           1 :                         dbMeta.bounds.Expand(k.comparer.Compare, k.makeEndExclusiveBounds(obj.bounds.Start, obj.bounds.End))
     649             :                 }
     650           1 :         case *applyOp:
     651           1 :                 // Merge the keys from this batch into the parent writer.
     652           1 :                 k.mergeObjectInto(s.batchID, s.writerID)
     653           1 :         case *batchCommitOp:
     654           1 :                 // Merge the keys from the batch with the keys from the DB.
     655           1 :                 k.mergeObjectInto(s.batchID, s.dbID)
     656             :         }
     657             : }
     658             : 
     659           1 : func (k *keyManager) knownKeys() (keys [][]byte) {
     660           1 :         return k.globalKeys
     661           1 : }
     662             : 
     663             : // knownKeysInRange returns all eligible read keys within the range
     664             : // [start,end). The returned slice is owned by the keyManager and must not be
     665             : // retained.
     666           1 : func (k *keyManager) knownKeysInRange(kr pebble.KeyRange) (keys [][]byte) {
     667           1 :         s, _ := slices.BinarySearchFunc(k.globalKeys, kr.Start, k.comparer.Compare)
     668           1 :         e, _ := slices.BinarySearchFunc(k.globalKeys, kr.End, k.comparer.Compare)
     669           1 :         if s >= e {
     670           1 :                 return nil
     671           1 :         }
     672           1 :         return k.globalKeys[s:e]
     673             : }
     674             : 
     675           1 : func (k *keyManager) prefixes() (prefixes [][]byte) {
     676           1 :         return k.globalKeyPrefixes
     677           1 : }
     678             : 
     679             : // prefixExists returns true if a key has been generated with the provided
     680             : // prefix before.
     681           1 : func (k *keyManager) prefixExists(prefix []byte) bool {
     682           1 :         _, exists := k.globalKeyPrefixesMap[string(prefix)]
     683           1 :         return exists
     684           1 : }
     685             : 
     686             : // eligibleSingleDeleteKeys returns a slice of keys that can be safely single
     687             : // deleted, given the writer id. Restricting single delete keys through this
     688             : // method is used to ensure the OLW1 guarantee (see the keyManager comment) for
     689             : // the provided object ID.
     690           1 : func (k *keyManager) eligibleSingleDeleteKeys(o objID) (keys [][]byte) {
     691           1 :         // Creating a slice of keys is wasteful given that the caller will pick one,
     692           1 :         // but makes it simpler for unit testing.
     693           1 :         objKeys := k.objKeyMeta(o)
     694           1 :         for _, key := range k.globalKeys {
     695           1 :                 meta, ok := objKeys.keys[string(key)]
     696           1 :                 if !ok {
     697           1 :                         keys = append(keys, key)
     698           1 :                         continue
     699             :                 }
     700             :                 // Examine the history within this object.
     701           1 :                 if meta.history.canSingleDelete() {
     702           1 :                         keys = append(keys, key)
     703           1 :                 }
     704             :         }
     705           1 :         return keys
     706             : }
     707             : 
     708             : // makeSingleKeyBounds creates a [key, key] bound.
     709           1 : func (k *keyManager) makeSingleKeyBounds(key []byte) bounds {
     710           1 :         return bounds{
     711           1 :                 smallest:    key,
     712           1 :                 largest:     key,
     713           1 :                 largestExcl: false,
     714           1 :         }
     715           1 : }
     716             : 
     717             : // makeEndExclusiveBounds creates a [smallest, largest) bound.
     718           1 : func (k *keyManager) makeEndExclusiveBounds(smallest, largest []byte) bounds {
     719           1 :         b := bounds{
     720           1 :                 smallest:    smallest,
     721           1 :                 largest:     largest,
     722           1 :                 largestExcl: true,
     723           1 :         }
     724           1 :         b.checkValid(k.comparer.Compare)
     725           1 :         return b
     726           1 : }
     727             : 
     728             : // a keyHistoryItem describes an individual operation performed on a key.
     729             : type keyHistoryItem struct {
     730             :         // opType may be writerSet, writerDelete, writerSingleDelete,
     731             :         // writerDeleteRange or writerMerge only. No other opTypes may appear here.
     732             :         opType        OpType
     733             :         metaTimestamp int
     734             : }
     735             : 
     736             : // keyHistory captures the history of mutations to a key in chronological order.
     737             : type keyHistory []keyHistoryItem
     738             : 
     739             : // before returns the subslice of the key history that happened strictly before
     740             : // the provided meta timestamp.
     741           0 : func (h keyHistory) before(metaTimestamp int) keyHistory {
     742           0 :         i, _ := slices.BinarySearchFunc(h, metaTimestamp, func(a keyHistoryItem, ts int) int {
     743           0 :                 return cmp.Compare(a.metaTimestamp, ts)
     744           0 :         })
     745           0 :         return h[:i]
     746             : }
     747             : 
     748             : // canSingleDelete examines the tail of the history and returns true if a single
     749             : // delete appended to this history would satisfy the single delete invariants.
     750           1 : func (h keyHistory) canSingleDelete() bool {
     751           1 :         if len(h) == 0 {
     752           1 :                 return true
     753           1 :         }
     754           1 :         switch o := h[len(h)-1].opType; o {
     755           1 :         case OpWriterDelete, OpWriterDeleteRange, OpWriterSingleDelete:
     756           1 :                 return true
     757           1 :         case OpWriterSet, OpWriterMerge:
     758           1 :                 if len(h) == 1 {
     759           1 :                         return true
     760           1 :                 }
     761           1 :                 return h[len(h)-2].opType.isDelete()
     762           0 :         default:
     763           0 :                 panic(errors.AssertionFailedf("unexpected writer op %v", o))
     764             :         }
     765             : }
     766             : 
     767           0 : func (h keyHistory) String() string {
     768           0 :         var sb strings.Builder
     769           0 :         for i, it := range h {
     770           0 :                 if i > 0 {
     771           0 :                         fmt.Fprint(&sb, ", ")
     772           0 :                 }
     773           0 :                 switch it.opType {
     774           0 :                 case OpWriterDelete:
     775           0 :                         fmt.Fprint(&sb, "del")
     776           0 :                 case OpWriterDeleteRange:
     777           0 :                         fmt.Fprint(&sb, "delrange")
     778           0 :                 case OpWriterSingleDelete:
     779           0 :                         fmt.Fprint(&sb, "singledel")
     780           0 :                 case OpWriterSet:
     781           0 :                         fmt.Fprint(&sb, "set")
     782           0 :                 case OpWriterMerge:
     783           0 :                         fmt.Fprint(&sb, "merge")
     784           0 :                 default:
     785           0 :                         fmt.Fprintf(&sb, "optype[v=%d]", it.opType)
     786             :                 }
     787           0 :                 fmt.Fprintf(&sb, "(%d)", it.metaTimestamp)
     788             :         }
     789           0 :         return sb.String()
     790             : }
     791             : 
     792             : // hasVisibleKey examines the tail of the history and returns true if the
     793             : // history should end in a visible value for this key.
     794           0 : func (h keyHistory) hasVisibleValue() bool {
     795           0 :         if len(h) == 0 {
     796           0 :                 return false
     797           0 :         }
     798           0 :         return !h[len(h)-1].opType.isDelete()
     799             : }
     800             : 
     801             : // collapsed returns a new key history that's equivalent to the history created
     802             : // by an ingestOp that "collapses" a batch's keys. See ingestOp.build.
     803           1 : func (h keyHistory) collapsed() keyHistory {
     804           1 :         var ret keyHistory
     805           1 :         // When collapsing a batch, any range deletes are semantically applied
     806           1 :         // first. Look for any range deletes and apply them.
     807           1 :         for _, op := range h {
     808           1 :                 if op.opType == OpWriterDeleteRange {
     809           1 :                         ret = append(ret, op)
     810           1 :                         break
     811             :                 }
     812             :         }
     813             :         // Among point keys, the most recently written key wins.
     814           1 :         for i := len(h) - 1; i >= 0; i-- {
     815           1 :                 if h[i].opType != OpWriterDeleteRange {
     816           1 :                         ret = append(ret, h[i])
     817           1 :                         break
     818             :                 }
     819             :         }
     820           1 :         return ret
     821             : }
     822             : 
     823           1 : func opWrittenKeys(untypedOp op) [][]byte {
     824           1 :         switch t := untypedOp.(type) {
     825           1 :         case *applyOp:
     826           1 :         case *batchCommitOp:
     827           1 :         case *checkpointOp:
     828           1 :         case *closeOp:
     829           1 :         case *compactOp:
     830           1 :         case *dbRestartOp:
     831           1 :         case *deleteOp:
     832           1 :                 return [][]byte{t.key}
     833           1 :         case *deleteRangeOp:
     834           1 :                 return [][]byte{t.start, t.end}
     835           1 :         case *flushOp:
     836           1 :         case *getOp:
     837           1 :         case *ingestOp:
     838           1 :         case *initOp:
     839           1 :         case *iterFirstOp:
     840           1 :         case *iterLastOp:
     841           1 :         case *iterNextOp:
     842           1 :         case *iterNextPrefixOp:
     843           1 :         case *iterCanSingleDelOp:
     844           1 :         case *iterPrevOp:
     845           1 :         case *iterSeekGEOp:
     846           1 :         case *iterSeekLTOp:
     847           1 :         case *iterSeekPrefixGEOp:
     848           1 :         case *iterSetBoundsOp:
     849           1 :         case *iterSetOptionsOp:
     850           1 :         case *mergeOp:
     851           1 :                 return [][]byte{t.key}
     852           1 :         case *newBatchOp:
     853           1 :         case *newIndexedBatchOp:
     854           1 :         case *newIterOp:
     855           1 :         case *newIterUsingCloneOp:
     856           1 :         case *newSnapshotOp:
     857           1 :         case *rangeKeyDeleteOp:
     858           1 :         case *rangeKeySetOp:
     859           1 :         case *rangeKeyUnsetOp:
     860           1 :         case *setOp:
     861           1 :                 return [][]byte{t.key}
     862           1 :         case *singleDeleteOp:
     863           1 :                 return [][]byte{t.key}
     864           1 :         case *replicateOp:
     865           1 :                 return [][]byte{t.start, t.end}
     866             :         }
     867           1 :         return nil
     868             : }
     869             : 
     870           1 : func loadPrecedingKeys(t TestingT, ops []op, cfg *OpConfig, m *keyManager) {
     871           1 :         for _, op := range ops {
     872           1 :                 // Pretend we're generating all the operation's keys as potential new
     873           1 :                 // key, so that we update the key manager's keys and prefix sets.
     874           1 :                 for _, k := range opWrittenKeys(op) {
     875           1 :                         m.addNewKey(k)
     876           1 : 
     877           1 :                         // If the key has a suffix, ratchet up the suffix distribution if
     878           1 :                         // necessary.
     879           1 :                         if s := m.comparer.Split(k); s < len(k) {
     880           1 :                                 suffix, err := testkeys.ParseSuffix(k[s:])
     881           1 :                                 require.NoError(t, err)
     882           1 :                                 if uint64(suffix) > cfg.writeSuffixDist.Max() {
     883           1 :                                         diff := int(uint64(suffix) - cfg.writeSuffixDist.Max())
     884           1 :                                         cfg.writeSuffixDist.IncMax(diff)
     885           1 :                                 }
     886             :                         }
     887             :                 }
     888             : 
     889             :                 // Update key tracking state.
     890           1 :                 m.update(op)
     891             :         }
     892             : }
     893             : 
     894           1 : func insertSorted(cmp base.Compare, dst *[][]byte, k []byte) {
     895           1 :         s := *dst
     896           1 :         i, _ := slices.BinarySearchFunc(s, k, cmp)
     897           1 :         *dst = slices.Insert(s, i, k)
     898           1 : }

Generated by: LCOV version 1.14