Line data Source code
1 : package metamorphic
2 :
3 : import (
4 : "cmp"
5 : "fmt"
6 : "slices"
7 :
8 : "github.com/cockroachdb/pebble"
9 : "github.com/cockroachdb/pebble/internal/base"
10 : "github.com/cockroachdb/pebble/internal/testkeys"
11 : "github.com/stretchr/testify/require"
12 : )
13 :
14 : // objKey is a tuple of (objID, key). This struct is used primarily as a map
15 : // key for keyManager. Only writer objTags can occur here, i.e., dbTag and
16 : // batchTag, since this is used for tracking the keys in a writer.
17 : type objKey struct {
18 : id objID
19 : key []byte
20 : }
21 :
22 : // makeObjKey returns a new objKey given and id and key.
23 0 : func makeObjKey(id objID, key []byte) objKey {
24 0 : if id.tag() != dbTag && id.tag() != batchTag {
25 0 : panic("unexpected non-writer tag")
26 : }
27 0 : return objKey{id, key}
28 : }
29 :
30 : // String implements fmt.Stringer, returning a stable string representation of
31 : // the objKey. This string is used as map key.
32 0 : func (o objKey) String() string {
33 0 : return fmt.Sprintf("%s:%s", o.id, o.key)
34 0 : }
35 :
36 : type keyUpdate struct {
37 : deleted bool
38 : // metaTimestamp at which the write or delete op occurred.
39 : metaTimestamp int
40 : }
41 :
42 : // keyMeta is metadata associated with an (objID, key) pair, where objID is
43 : // a writer containing the key.
44 : type keyMeta struct {
45 : objKey
46 :
47 : // The number of Sets of the key in this writer.
48 : sets int
49 : // The number of Merges of the key in this writer.
50 : merges int
51 : // singleDel can be true only if sets <= 1 && merges == 0 and the
52 : // SingleDelete was added to this writer after the set.
53 : singleDel bool
54 : // The number of Deletes of the key in this writer.
55 : dels int
56 : // del can be true only if a Delete was added to this writer after the
57 : // Sets and Merges counted above.
58 : del bool
59 :
60 : // updateOps should always be ordered by non-decreasing metaTimestamp.
61 : // updateOps will not be updated if the key is range deleted. Therefore, it
62 : // is a best effort sequence of updates to the key. updateOps is used to
63 : // determine if an iterator created on the DB can read a certain key.
64 : updateOps []keyUpdate
65 : }
66 :
67 0 : func (m *keyMeta) clear() {
68 0 : m.sets = 0
69 0 : m.merges = 0
70 0 : m.singleDel = false
71 0 : m.del = false
72 0 : m.dels = 0
73 0 : m.updateOps = nil
74 0 : }
75 :
76 : // mergeInto merges this metadata this into the metadata for other.
77 0 : func (m *keyMeta) mergeInto(keyManager *keyManager, other *keyMeta) {
78 0 : if other.del && !m.del {
79 0 : // m's Sets and Merges are later.
80 0 : if m.sets > 0 || m.merges > 0 {
81 0 : other.del = false
82 0 : }
83 0 : } else {
84 0 : other.del = m.del
85 0 : }
86 : // Sets, merges, dels are additive.
87 0 : other.sets += m.sets
88 0 : other.merges += m.merges
89 0 : other.dels += m.dels
90 0 :
91 0 : // Single deletes are preserved. This is valid since we are also
92 0 : // maintaining a global invariant that SingleDelete will only be added for
93 0 : // a key that has no inflight Sets or Merges (Sets have made their way to
94 0 : // the DB), and no subsequent Sets or Merges will happen until the
95 0 : // SingleDelete makes its way to the DB.
96 0 : other.singleDel = other.singleDel || m.singleDel
97 0 : if other.singleDel {
98 0 : if other.sets > 1 || other.merges > 0 || other.dels > 0 {
99 0 : panic(fmt.Sprintf("invalid sets %d or merges %d or dels %d",
100 0 : other.sets, other.merges, other.dels))
101 : }
102 : }
103 :
104 : // Determine if the key is visible or not after the keyMetas are merged.
105 : // TODO(bananabrick): We currently only care about key updates which make it
106 : // to the DB, since we only use key updates to determine if an iterator
107 : // can read a key in the DB. We could extend the timestamp system to add
108 : // support for iterators created on batches.
109 0 : if other.del || other.singleDel {
110 0 : other.updateOps = append(
111 0 : other.updateOps, keyUpdate{true, keyManager.nextMetaTimestamp()},
112 0 : )
113 0 : } else {
114 0 : other.updateOps = append(
115 0 : other.updateOps, keyUpdate{false, keyManager.nextMetaTimestamp()},
116 0 : )
117 0 : }
118 : }
119 :
120 : // keyManager tracks the write operations performed on keys in the generation
121 : // phase of the metamorphic test. It makes the assumption that write
122 : // operations do not fail, since that can cause the keyManager state to be not
123 : // in-sync with the actual state of the writers. This assumption is needed to
124 : // correctly decide when it is safe to generate a SingleDelete. This
125 : // assumption is violated in a single place in the metamorphic test: ingestion
126 : // of multiple batches. We sidestep this issue in a narrow way in
127 : // generator.writerIngest by not ingesting multiple batches that contain
128 : // deletes or single deletes, since loss of those specific operations on a key
129 : // are what we cannot tolerate (doing SingleDelete on a key that has not been
130 : // written to because the Set was lost is harmless).
131 : type keyManager struct {
132 : comparer *base.Comparer
133 :
134 : // metaTimestamp is used to provide a ordering over certain operations like
135 : // iter creation, updates to keys. Keeping track of the timestamp allows us
136 : // to make determinations such as whether a key will be visible to an
137 : // iterator.
138 : metaTimestamp int
139 :
140 : // byObjKey tracks the state for each (writer, key) pair. It refers to the
141 : // same *keyMeta as in the byObj slices. Using a map allows for fast state
142 : // lookups when changing the state based on a writer operation on the key.
143 : byObjKey map[string]*keyMeta
144 : // List of keys per writer, and what has happened to it in that writer.
145 : // Will be transferred when needed.
146 : byObj map[objID][]*keyMeta
147 :
148 : // globalKeys represents all the keys that have been generated so far. Not
149 : // all these keys have been written to. globalKeys is sorted.
150 : globalKeys [][]byte
151 : // globalKeysMap contains the same keys as globalKeys. It ensures no
152 : // duplication, and contains the aggregate state of the key across all
153 : // writers, including inflight state that has not made its way to the DB
154 : // yet.The keyMeta.objKey is uninitialized.
155 : globalKeysMap map[string]*keyMeta
156 : // globalKeyPrefixes contains all the key prefixes (as defined by the
157 : // comparer's Split) generated so far. globalKeyPrefixes is sorted.
158 : globalKeyPrefixes [][]byte
159 : // globalKeyPrefixesMap contains the same keys as globalKeyPrefixes. It
160 : // ensures no duplication.
161 : globalKeyPrefixesMap map[string]struct{}
162 :
163 : // Using SingleDeletes imposes some constraints on the above state, and
164 : // causes some state transitions that help with generating complex but
165 : // correct sequences involving SingleDeletes.
166 : // - Generating a SingleDelete requires for that key: global.merges==0 &&
167 : // global.sets==1 && global.dels==0 && !global.singleDel && (db.sets==1
168 : // || writer.sets==1), where global represents the entry in
169 : // globalKeysMap[key] and db represents the entry in
170 : // byObjKey[makeObjKey(makeObjID(dbTag, 0), key)], and writer is the
171 : // entry in byObjKey[makeObjKey(writerID, key)].
172 : //
173 : // - We do not track state changes due to range deletes, so one should
174 : // think of these counts as upper bounds. Also we are not preventing
175 : // interactions caused by concurrently in-flight range deletes and
176 : // SingleDelete. This is acceptable since it does not cause
177 : // non-determinism.
178 : //
179 : // - When the SingleDelete is generated, it is recorded as
180 : // writer.singleDel=true and global.singleDel=true. No more write
181 : // operations are permitted on this key until db.singleDel transitions
182 : // to true.
183 : //
184 : // - When db.singleDel transitions to true, we are guaranteed that no
185 : // writer other than the DB has any writes for this key. We set
186 : // db.singleDel and global.singleDel to false and the corresponding sets
187 : // and merges counts in global and db also to 0. This allows this key to
188 : // fully participate again in write operations. This means we can
189 : // generate sequences of the form:
190 : // SET => SINGLEDEL => SET* => MERGE* => DEL
191 : // SET => SINGLEDEL => SET => SINGLEDEL, among others.
192 : //
193 : // - The above logic is insufficient to generate sequences of the form
194 : // SET => DEL => SET => SINGLEDEL
195 : // To do this we need to track Deletes. When db.del transitions to true,
196 : // we check if db.sets==global.sets && db.merges==global.merges &&
197 : // db.dels==global.dels. If true, there are no in-flight
198 : // sets/merges/deletes to this key. We then default initialize the
199 : // global and db entries since one can behave as if this key was never
200 : // written in this system. This enables the above sequence, among
201 : // others.
202 : }
203 :
204 0 : func (k *keyManager) nextMetaTimestamp() int {
205 0 : ret := k.metaTimestamp
206 0 : k.metaTimestamp++
207 0 : return ret
208 0 : }
209 :
210 : // newKeyManager returns a pointer to a new keyManager. Callers should
211 : // interact with this using addNewKey, eligible*Keys, update,
212 : // canTolerateApplyFailure methods only.
213 0 : func newKeyManager(numInstances int) *keyManager {
214 0 : m := &keyManager{
215 0 : comparer: testkeys.Comparer,
216 0 : byObjKey: make(map[string]*keyMeta),
217 0 : byObj: make(map[objID][]*keyMeta),
218 0 : globalKeysMap: make(map[string]*keyMeta),
219 0 : globalKeyPrefixesMap: make(map[string]struct{}),
220 0 : }
221 0 : for i := 1; i <= max(numInstances, 1); i++ {
222 0 : m.byObj[makeObjID(dbTag, uint32(i))] = []*keyMeta{}
223 0 : }
224 0 : return m
225 : }
226 :
227 : // addNewKey adds the given key to the key manager for global key tracking.
228 : // Returns false iff this is not a new key.
229 0 : func (k *keyManager) addNewKey(key []byte) bool {
230 0 : _, ok := k.globalKeysMap[string(key)]
231 0 : if ok {
232 0 : return false
233 0 : }
234 0 : keyString := string(key)
235 0 : insertSorted(k.comparer.Compare, &k.globalKeys, key)
236 0 : k.globalKeysMap[keyString] = &keyMeta{objKey: objKey{key: key}}
237 0 :
238 0 : prefixLen := k.comparer.Split(key)
239 0 : if _, ok := k.globalKeyPrefixesMap[keyString[:prefixLen]]; !ok {
240 0 : insertSorted(k.comparer.Compare, &k.globalKeyPrefixes, key[:prefixLen])
241 0 : k.globalKeyPrefixesMap[keyString[:prefixLen]] = struct{}{}
242 0 : }
243 0 : return true
244 : }
245 :
246 : // getOrInit returns the keyMeta for the (objID, key) pair, if it exists, else
247 : // allocates, initializes and returns a new value.
248 0 : func (k *keyManager) getOrInit(id objID, key []byte) *keyMeta {
249 0 : o := makeObjKey(id, key)
250 0 : m, ok := k.byObjKey[o.String()]
251 0 : if ok {
252 0 : return m
253 0 : }
254 0 : m = &keyMeta{objKey: makeObjKey(id, key)}
255 0 : // Initialize the key-to-meta index.
256 0 : k.byObjKey[o.String()] = m
257 0 : // Add to the id-to-metas slide.
258 0 : k.byObj[o.id] = append(k.byObj[o.id], m)
259 0 : return m
260 : }
261 :
262 : // contains returns true if the (objID, key) pair is tracked by the keyManager.
263 0 : func (k *keyManager) contains(id objID, key []byte) bool {
264 0 : _, ok := k.byObjKey[makeObjKey(id, key).String()]
265 0 : return ok
266 0 : }
267 :
268 : // mergeKeysInto merges all metadata for all keys associated with the "from" ID
269 : // with the metadata for keys associated with the "to" ID.
270 0 : func (k *keyManager) mergeKeysInto(from, to objID) {
271 0 : msFrom, ok := k.byObj[from]
272 0 : if !ok {
273 0 : msFrom = []*keyMeta{}
274 0 : k.byObj[from] = msFrom
275 0 : }
276 :
277 0 : msTo, ok := k.byObj[to]
278 0 : if !ok {
279 0 : msTo = []*keyMeta{}
280 0 : k.byObj[to] = msTo
281 0 : }
282 :
283 : // Sort to facilitate a merge.
284 0 : slices.SortFunc(msFrom, func(a, b *keyMeta) int {
285 0 : return cmp.Compare(a.String(), b.String())
286 0 : })
287 0 : slices.SortFunc(msTo, func(a, b *keyMeta) int {
288 0 : return cmp.Compare(a.String(), b.String())
289 0 : })
290 :
291 0 : var msNew []*keyMeta
292 0 : var iTo int
293 0 : for _, m := range msFrom {
294 0 : // Move cursor on mTo forward.
295 0 : for iTo < len(msTo) && string(msTo[iTo].key) < string(m.key) {
296 0 : msNew = append(msNew, msTo[iTo])
297 0 : iTo++
298 0 : }
299 :
300 0 : var mTo *keyMeta
301 0 : if iTo < len(msTo) && string(msTo[iTo].key) == string(m.key) {
302 0 : mTo = msTo[iTo]
303 0 : iTo++
304 0 : } else {
305 0 : mTo = &keyMeta{objKey: makeObjKey(to, m.key)}
306 0 : k.byObjKey[mTo.String()] = mTo
307 0 : }
308 :
309 0 : m.mergeInto(k, mTo)
310 0 : msNew = append(msNew, mTo)
311 0 :
312 0 : delete(k.byObjKey, m.String()) // Unlink "from".
313 : }
314 :
315 : // Add any remaining items from the "to" set.
316 0 : for iTo < len(msTo) {
317 0 : msNew = append(msNew, msTo[iTo])
318 0 : iTo++
319 0 : }
320 :
321 0 : k.byObj[to] = msNew // Update "to".
322 0 : delete(k.byObj, from) // Unlink "from".
323 : }
324 :
325 0 : func (k *keyManager) checkForDelOrSingleDelTransition(dbMeta *keyMeta, globalMeta *keyMeta) {
326 0 : if dbMeta.singleDel {
327 0 : if !globalMeta.singleDel {
328 0 : panic("inconsistency with globalMeta")
329 : }
330 0 : if dbMeta.del || globalMeta.del || dbMeta.dels > 0 || globalMeta.dels > 0 ||
331 0 : dbMeta.merges > 0 || globalMeta.merges > 0 || dbMeta.sets != 1 || globalMeta.sets != 1 {
332 0 : panic("inconsistency in metas when SingleDelete applied to DB")
333 : }
334 0 : dbMeta.clear()
335 0 : globalMeta.clear()
336 0 : return
337 : }
338 0 : if dbMeta.del && globalMeta.sets == dbMeta.sets && globalMeta.merges == dbMeta.merges &&
339 0 : globalMeta.dels == dbMeta.dels {
340 0 : if dbMeta.singleDel || globalMeta.singleDel {
341 0 : panic("Delete should not have happened given SingleDelete")
342 : }
343 0 : dbMeta.clear()
344 0 : globalMeta.clear()
345 : }
346 : }
347 :
348 0 : func (k *keyManager) checkForDelOrSingleDelTransitionInDB(dbID objID) {
349 0 : keys := k.byObj[dbID]
350 0 : for _, dbMeta := range keys {
351 0 : globalMeta := k.globalKeysMap[string(dbMeta.key)]
352 0 : k.checkForDelOrSingleDelTransition(dbMeta, globalMeta)
353 0 : }
354 : }
355 :
356 : // update updates the internal state of the keyManager according to the given
357 : // op.
358 0 : func (k *keyManager) update(o op) {
359 0 : switch s := o.(type) {
360 0 : case *setOp:
361 0 : meta := k.getOrInit(s.writerID, s.key)
362 0 : globalMeta := k.globalKeysMap[string(s.key)]
363 0 : meta.sets++ // Update the set count on this specific (id, key) pair.
364 0 : meta.del = false
365 0 : globalMeta.sets++
366 0 : meta.updateOps = append(meta.updateOps, keyUpdate{false, k.nextMetaTimestamp()})
367 0 : if meta.singleDel || globalMeta.singleDel {
368 0 : panic("setting a key that has in-flight SingleDelete")
369 : }
370 0 : case *mergeOp:
371 0 : meta := k.getOrInit(s.writerID, s.key)
372 0 : globalMeta := k.globalKeysMap[string(s.key)]
373 0 : meta.merges++
374 0 : meta.del = false
375 0 : globalMeta.merges++
376 0 : meta.updateOps = append(meta.updateOps, keyUpdate{false, k.nextMetaTimestamp()})
377 0 : if meta.singleDel || globalMeta.singleDel {
378 0 : panic("merging a key that has in-flight SingleDelete")
379 : }
380 0 : case *deleteOp:
381 0 : meta := k.getOrInit(s.writerID, s.key)
382 0 : globalMeta := k.globalKeysMap[string(s.key)]
383 0 : meta.del = true
384 0 : globalMeta.del = true
385 0 : meta.dels++
386 0 : globalMeta.dels++
387 0 : meta.updateOps = append(meta.updateOps, keyUpdate{true, k.nextMetaTimestamp()})
388 0 : if s.writerID.tag() == dbTag {
389 0 : k.checkForDelOrSingleDelTransition(meta, globalMeta)
390 0 : }
391 0 : case *singleDeleteOp:
392 0 : if !k.globalStateIndicatesEligibleForSingleDelete(s.key) {
393 0 : panic("key ineligible for SingleDelete")
394 : }
395 0 : meta := k.getOrInit(s.writerID, s.key)
396 0 : globalMeta := k.globalKeysMap[string(s.key)]
397 0 : meta.singleDel = true
398 0 : globalMeta.singleDel = true
399 0 : meta.updateOps = append(meta.updateOps, keyUpdate{true, k.nextMetaTimestamp()})
400 0 : if s.writerID.tag() == dbTag {
401 0 : k.checkForDelOrSingleDelTransition(meta, globalMeta)
402 0 : }
403 0 : case *ingestOp:
404 0 : // For each batch, merge all keys with the keys in the DB.
405 0 : for _, batchID := range s.batchIDs {
406 0 : k.mergeKeysInto(batchID, s.dbID)
407 0 : }
408 0 : k.checkForDelOrSingleDelTransitionInDB(s.dbID)
409 0 : case *applyOp:
410 0 : // Merge the keys from this writer into the parent writer.
411 0 : k.mergeKeysInto(s.batchID, s.writerID)
412 0 : if s.writerID.tag() == dbTag {
413 0 : k.checkForDelOrSingleDelTransitionInDB(s.writerID)
414 0 : }
415 0 : case *batchCommitOp:
416 0 : // Merge the keys from the batch with the keys from the DB.
417 0 : k.mergeKeysInto(s.batchID, s.dbID)
418 0 : k.checkForDelOrSingleDelTransitionInDB(s.dbID)
419 : }
420 : }
421 :
422 0 : func (k *keyManager) eligibleReadKeys() (keys [][]byte) {
423 0 : return k.globalKeys
424 0 : }
425 :
426 : // eligibleReadKeysInRange returns all eligible read keys within the range
427 : // [start,end). The returned slice is owned by the keyManager and must not be
428 : // retained.
429 0 : func (k *keyManager) eligibleReadKeysInRange(kr pebble.KeyRange) (keys [][]byte) {
430 0 : s, _ := slices.BinarySearchFunc(k.globalKeys, kr.Start, k.comparer.Compare)
431 0 : e, _ := slices.BinarySearchFunc(k.globalKeys, kr.End, k.comparer.Compare)
432 0 : if s >= e {
433 0 : return nil
434 0 : }
435 0 : return k.globalKeys[s:e]
436 : }
437 :
438 0 : func (k *keyManager) prefixes() (prefixes [][]byte) {
439 0 : return k.globalKeyPrefixes
440 0 : }
441 :
442 : // prefixExists returns true if a key has been generated with the provided
443 : // prefix before.
444 0 : func (k *keyManager) prefixExists(prefix []byte) bool {
445 0 : _, exists := k.globalKeyPrefixesMap[string(prefix)]
446 0 : return exists
447 0 : }
448 :
449 0 : func (k *keyManager) eligibleWriteKeys() (keys [][]byte) {
450 0 : // Creating and sorting this slice of keys is wasteful given that the
451 0 : // caller will pick one, but makes it simpler for unit testing.
452 0 : for _, v := range k.globalKeysMap {
453 0 : if v.singleDel {
454 0 : continue
455 : }
456 0 : keys = append(keys, v.key)
457 : }
458 0 : slices.SortFunc(keys, k.comparer.Compare)
459 0 : return keys
460 : }
461 :
462 : // eligibleSingleDeleteKeys returns a slice of keys that can be safely single
463 : // deleted, given the writer id.
464 0 : func (k *keyManager) eligibleSingleDeleteKeys(id, dbID objID) (keys [][]byte) {
465 0 : // Creating and sorting this slice of keys is wasteful given that the
466 0 : // caller will pick one, but makes it simpler for unit testing.
467 0 : addForObjID := func(id objID) {
468 0 : for _, m := range k.byObj[id] {
469 0 : if m.sets == 1 && k.globalStateIndicatesEligibleForSingleDelete(m.key) {
470 0 : keys = append(keys, m.key)
471 0 : }
472 : }
473 : }
474 0 : addForObjID(id)
475 0 : if id.tag() != dbTag {
476 0 : addForObjID(dbID)
477 0 : }
478 0 : slices.SortFunc(keys, k.comparer.Compare)
479 0 : return keys
480 : }
481 :
482 0 : func (k *keyManager) globalStateIndicatesEligibleForSingleDelete(key []byte) bool {
483 0 : m := k.globalKeysMap[string(key)]
484 0 : return m.merges == 0 && m.sets == 1 && m.dels == 0 && !m.singleDel
485 0 : }
486 :
487 : // canTolerateApplyFailure is called with a batch ID and returns true iff a
488 : // failure to apply this batch to the DB can be tolerated.
489 0 : func (k *keyManager) canTolerateApplyFailure(id objID) bool {
490 0 : if id.tag() != batchTag {
491 0 : panic("called with an objID that is not a batch")
492 : }
493 0 : ms, ok := k.byObj[id]
494 0 : if !ok {
495 0 : return true
496 0 : }
497 0 : for _, m := range ms {
498 0 : if m.singleDel || m.del {
499 0 : return false
500 0 : }
501 : }
502 0 : return true
503 : }
504 :
505 0 : func opWrittenKeys(untypedOp op) [][]byte {
506 0 : switch t := untypedOp.(type) {
507 0 : case *applyOp:
508 0 : case *batchCommitOp:
509 0 : case *checkpointOp:
510 0 : case *closeOp:
511 0 : case *compactOp:
512 0 : case *dbRestartOp:
513 0 : case *deleteOp:
514 0 : return [][]byte{t.key}
515 0 : case *deleteRangeOp:
516 0 : return [][]byte{t.start, t.end}
517 0 : case *flushOp:
518 0 : case *getOp:
519 0 : case *ingestOp:
520 0 : case *initOp:
521 0 : case *iterFirstOp:
522 0 : case *iterLastOp:
523 0 : case *iterNextOp:
524 0 : case *iterNextPrefixOp:
525 0 : case *iterPrevOp:
526 0 : case *iterSeekGEOp:
527 0 : case *iterSeekLTOp:
528 0 : case *iterSeekPrefixGEOp:
529 0 : case *iterSetBoundsOp:
530 0 : case *iterSetOptionsOp:
531 0 : case *mergeOp:
532 0 : return [][]byte{t.key}
533 0 : case *newBatchOp:
534 0 : case *newIndexedBatchOp:
535 0 : case *newIterOp:
536 0 : case *newIterUsingCloneOp:
537 0 : case *newSnapshotOp:
538 0 : case *rangeKeyDeleteOp:
539 0 : case *rangeKeySetOp:
540 0 : case *rangeKeyUnsetOp:
541 0 : case *setOp:
542 0 : return [][]byte{t.key}
543 0 : case *singleDeleteOp:
544 0 : return [][]byte{t.key}
545 0 : case *replicateOp:
546 0 : return [][]byte{t.start, t.end}
547 : }
548 0 : return nil
549 : }
550 :
551 0 : func loadPrecedingKeys(t TestingT, ops []op, cfg *config, m *keyManager) {
552 0 : for _, op := range ops {
553 0 : // Pretend we're generating all the operation's keys as potential new
554 0 : // key, so that we update the key manager's keys and prefix sets.
555 0 : for _, k := range opWrittenKeys(op) {
556 0 : m.addNewKey(k)
557 0 :
558 0 : // If the key has a suffix, ratchet up the suffix distribution if
559 0 : // necessary.
560 0 : if s := m.comparer.Split(k); s < len(k) {
561 0 : suffix, err := testkeys.ParseSuffix(k[s:])
562 0 : require.NoError(t, err)
563 0 : if uint64(suffix) > cfg.writeSuffixDist.Max() {
564 0 : diff := int(uint64(suffix) - cfg.writeSuffixDist.Max())
565 0 : cfg.writeSuffixDist.IncMax(diff)
566 0 : }
567 : }
568 : }
569 :
570 : // Update key tracking state.
571 0 : m.update(op)
572 : }
573 : }
574 :
575 0 : func insertSorted(cmp base.Compare, dst *[][]byte, k []byte) {
576 0 : s := *dst
577 0 : i, _ := slices.BinarySearchFunc(s, k, cmp)
578 0 : *dst = slices.Insert(s, i, k)
579 0 : }
|