Line data Source code
1 : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package metamorphic
6 :
7 : import (
8 : "context"
9 : "fmt"
10 : "io"
11 : "os"
12 : "path"
13 : "path/filepath"
14 : "sort"
15 : "strings"
16 : "time"
17 :
18 : "github.com/cockroachdb/errors"
19 : "github.com/cockroachdb/pebble"
20 : "github.com/cockroachdb/pebble/objstorage/remote"
21 : "github.com/cockroachdb/pebble/sstable"
22 : "github.com/cockroachdb/pebble/vfs"
23 : "github.com/cockroachdb/pebble/vfs/errorfs"
24 : )
25 :
26 : // New constructs a new metamorphic test that runs the provided operations
27 : // against a database using the provided TestOptions and outputs the history of
28 : // events to an io.Writer.
29 : //
30 : // dir specifies the path within opts.Opts.FS to open the database.
31 1 : func New(ops Ops, opts *TestOptions, dir string, w io.Writer) (*Test, error) {
32 1 : t := newTest(ops)
33 1 : h := newHistory(nil /* failRegexp */, w)
34 1 : if err := t.init(h, dir, opts, 1 /* numInstances */, 0 /* opTimeout */); err != nil {
35 0 : return nil, err
36 0 : }
37 1 : return t, nil
38 : }
39 :
40 : // A Test configures an individual test run consisting of a set of operations,
41 : // TestOptions configuring the target database to which the operations should be
42 : // applied, and a sink for outputting test history.
43 : type Test struct {
44 : // The list of ops to execute. The ops refer to slots in the batches, iters,
45 : // and snapshots slices.
46 : ops []op
47 : opsWaitOn [][]int // op index -> op indexes
48 : opsDone []chan struct{} // op index -> done channel
49 : idx int
50 : dir string
51 : h *history
52 : opTimeout time.Duration
53 : opts *pebble.Options
54 : testOpts *TestOptions
55 : writeOpts *pebble.WriteOptions
56 : tmpDir string
57 : // The DBs the test is run on.
58 : dbs []*pebble.DB
59 : // The slots for the batches, iterators, and snapshots. These are read and
60 : // written by the ops to pass state from one op to another.
61 : batches []*pebble.Batch
62 : iters []*retryableIter
63 : snapshots []readerCloser
64 : externalObjs []externalObjMeta
65 :
66 : // externalStorage is used to write external objects. If external storage is
67 : // enabled, this is the same with testOpts.externalStorageFS; otherwise, this
68 : // is an in-memory implementation used only by the test.
69 : externalStorage remote.Storage
70 : }
71 :
72 : type externalObjMeta struct {
73 : sstMeta *sstable.WriterMetadata
74 : }
75 :
76 2 : func newTest(ops []op) *Test {
77 2 : return &Test{
78 2 : ops: ops,
79 2 : }
80 2 : }
81 :
82 : func (t *Test) init(
83 : h *history, dir string, testOpts *TestOptions, numInstances int, opTimeout time.Duration,
84 2 : ) error {
85 2 : t.dir = dir
86 2 : t.h = h
87 2 : t.opTimeout = opTimeout
88 2 : t.testOpts = testOpts
89 2 : t.writeOpts = pebble.NoSync
90 2 : if testOpts.strictFS {
91 2 : t.writeOpts = pebble.Sync
92 2 : } else {
93 2 : t.writeOpts = pebble.NoSync
94 2 : }
95 2 : testOpts.Opts.WithFSDefaults()
96 2 : t.opts = testOpts.Opts.EnsureDefaults()
97 2 : t.opts.Logger = h
98 2 : lel := pebble.MakeLoggingEventListener(t.opts.Logger)
99 2 : t.opts.EventListener = &lel
100 2 : // If the test options set a DebugCheck func, wrap it with retrying of
101 2 : // retriable errors (according to the test's retry policy).
102 2 : if debugCheck := t.opts.DebugCheck; debugCheck != nil {
103 2 : t.opts.DebugCheck = func(db *pebble.DB) error {
104 2 : return t.withRetries(func() error { return debugCheck(db) })
105 : }
106 : }
107 2 : if numInstances < 1 {
108 2 : numInstances = 1
109 2 : }
110 2 : if t.testOpts.externalStorageEnabled {
111 2 : t.externalStorage = t.testOpts.externalStorageFS
112 2 : } else {
113 2 : t.externalStorage = remote.NewInMem()
114 2 : }
115 :
116 2 : t.opsWaitOn, t.opsDone = computeSynchronizationPoints(t.ops)
117 2 :
118 2 : defer t.opts.Cache.Unref()
119 2 :
120 2 : // If an error occurs and we were using an in-memory FS, attempt to clone to
121 2 : // on-disk in order to allow post-mortem debugging. Note that always using
122 2 : // the on-disk FS isn't desirable because there is a large performance
123 2 : // difference between in-memory and on-disk which causes different code paths
124 2 : // and timings to be exercised.
125 2 : maybeExit := func(err error) {
126 2 : if err == nil || errors.Is(err, errorfs.ErrInjected) || errors.Is(err, pebble.ErrCancelledCompaction) {
127 2 : return
128 2 : }
129 0 : t.saveInMemoryData()
130 0 : fmt.Fprintln(os.Stderr, err)
131 0 : os.Exit(1)
132 : }
133 :
134 : // Exit early on any error from a background operation.
135 2 : t.opts.EventListener.BackgroundError = func(err error) {
136 2 : t.opts.Logger.Infof("background error: %s", err)
137 2 : maybeExit(err)
138 2 : }
139 2 : t.opts.EventListener.CompactionEnd = func(info pebble.CompactionInfo) {
140 2 : t.opts.Logger.Infof("%s", info)
141 2 : maybeExit(info.Err)
142 2 : }
143 2 : t.opts.EventListener.FlushEnd = func(info pebble.FlushInfo) {
144 2 : t.opts.Logger.Infof("%s", info)
145 2 : if info.Err != nil && !strings.Contains(info.Err.Error(), "pebble: empty table") {
146 0 : maybeExit(info.Err)
147 0 : }
148 : }
149 2 : t.opts.EventListener.DownloadEnd = func(info pebble.DownloadInfo) {
150 2 : t.opts.Logger.Infof("%s", info)
151 2 : maybeExit(info.Err)
152 2 : }
153 2 : t.opts.EventListener.ManifestCreated = func(info pebble.ManifestCreateInfo) {
154 2 : t.opts.Logger.Infof("%s", info)
155 2 : maybeExit(info.Err)
156 2 : }
157 2 : t.opts.EventListener.ManifestDeleted = func(info pebble.ManifestDeleteInfo) {
158 2 : t.opts.Logger.Infof("%s", info)
159 2 : maybeExit(info.Err)
160 2 : }
161 2 : t.opts.EventListener.TableDeleted = func(info pebble.TableDeleteInfo) {
162 2 : t.opts.Logger.Infof("%s", info)
163 2 : maybeExit(info.Err)
164 2 : }
165 2 : t.opts.EventListener.TableIngested = func(info pebble.TableIngestInfo) {
166 2 : t.opts.Logger.Infof("%s", info)
167 2 : maybeExit(info.Err)
168 2 : }
169 2 : t.opts.EventListener.WALCreated = func(info pebble.WALCreateInfo) {
170 2 : t.opts.Logger.Infof("%s", info)
171 2 : maybeExit(info.Err)
172 2 : }
173 2 : t.opts.EventListener.WALDeleted = func(info pebble.WALDeleteInfo) {
174 2 : t.opts.Logger.Infof("%s", info)
175 2 : maybeExit(info.Err)
176 2 : }
177 :
178 2 : for i := range t.testOpts.CustomOpts {
179 0 : if err := t.testOpts.CustomOpts[i].Open(t.opts); err != nil {
180 0 : return err
181 0 : }
182 : }
183 :
184 2 : t.dbs = make([]*pebble.DB, numInstances)
185 2 : for i := range t.dbs {
186 2 : var db *pebble.DB
187 2 : var err error
188 2 : if len(t.dbs) > 1 {
189 1 : dir = path.Join(t.dir, fmt.Sprintf("db%d", i+1))
190 1 : }
191 2 : err = t.withRetries(func() error {
192 2 : db, err = pebble.Open(dir, t.opts)
193 2 : return err
194 2 : })
195 2 : if err != nil {
196 0 : return err
197 0 : }
198 2 : t.dbs[i] = db
199 2 : h.log.Printf("// db%d.Open() %v", i+1, err)
200 2 :
201 2 : if t.testOpts.sharedStorageEnabled {
202 2 : err = t.withRetries(func() error {
203 2 : return db.SetCreatorID(uint64(i + 1))
204 2 : })
205 2 : if err != nil {
206 0 : return err
207 0 : }
208 2 : h.log.Printf("// db%d.SetCreatorID() %v", i+1, err)
209 : }
210 : }
211 :
212 2 : var err error
213 2 : t.tmpDir = t.opts.FS.PathJoin(t.dir, "tmp")
214 2 : if err = t.opts.FS.MkdirAll(t.tmpDir, 0755); err != nil {
215 0 : return err
216 0 : }
217 2 : if t.testOpts.strictFS {
218 2 : // Sync the whole directory path for the tmpDir, since restartDB() is executed during
219 2 : // the test. That would reset MemFS to the synced state, which would make an unsynced
220 2 : // directory disappear in the middle of the test. It is the responsibility of the test
221 2 : // (not Pebble) to ensure that it can write the ssts that it will subsequently ingest
222 2 : // into Pebble.
223 2 : for {
224 2 : f, err := t.opts.FS.OpenDir(dir)
225 2 : if err != nil {
226 0 : return err
227 0 : }
228 2 : if err = f.Sync(); err != nil {
229 0 : return err
230 0 : }
231 2 : if err = f.Close(); err != nil {
232 0 : return err
233 0 : }
234 2 : if len(dir) == 1 {
235 2 : break
236 : }
237 2 : dir = t.opts.FS.PathDir(dir)
238 2 : // TODO(sbhola): PathDir returns ".", which OpenDir() complains about. Fix.
239 2 : if len(dir) == 1 {
240 2 : dir = "/"
241 2 : }
242 : }
243 : }
244 :
245 2 : return nil
246 : }
247 :
248 2 : func (t *Test) withRetries(fn func() error) error {
249 2 : return withRetries(fn, t.testOpts.RetryPolicy)
250 2 : }
251 :
252 2 : func (t *Test) isFMV(dbID objID, fmv pebble.FormatMajorVersion) bool {
253 2 : db := t.getDB(dbID)
254 2 : return db.FormatMajorVersion() >= fmv
255 2 : }
256 :
257 : // minFMV returns the minimum FormatMajorVersion between all databases.
258 2 : func (t *Test) minFMV() pebble.FormatMajorVersion {
259 2 : minVersion := pebble.FormatNewest
260 2 : for _, db := range t.dbs {
261 2 : if db != nil {
262 2 : minVersion = min(minVersion, db.FormatMajorVersion())
263 2 : }
264 : }
265 2 : return minVersion
266 : }
267 :
268 2 : func (t *Test) restartDB(dbID objID) error {
269 2 : db := t.getDB(dbID)
270 2 : if !t.testOpts.strictFS {
271 2 : return nil
272 2 : }
273 2 : t.opts.Cache.Ref()
274 2 : // The fs isn't necessarily a MemFS.
275 2 : fs, ok := vfs.Root(t.opts.FS).(*vfs.MemFS)
276 2 : if ok {
277 0 : fs.SetIgnoreSyncs(true)
278 0 : }
279 2 : if err := db.Close(); err != nil {
280 0 : return err
281 0 : }
282 : // Release any resources held by custom options. This may be used, for
283 : // example, by the encryption-at-rest custom option (within the Cockroach
284 : // repository) to close the file registry.
285 2 : for i := range t.testOpts.CustomOpts {
286 0 : if err := t.testOpts.CustomOpts[i].Close(t.opts); err != nil {
287 0 : return err
288 0 : }
289 : }
290 2 : if ok {
291 0 : fs.ResetToSyncedState()
292 0 : fs.SetIgnoreSyncs(false)
293 0 : }
294 :
295 : // TODO(jackson): Audit errorRate and ensure custom options' hooks semantics
296 : // are well defined within the context of retries.
297 2 : err := t.withRetries(func() (err error) {
298 2 : // Reacquire any resources required by custom options. This may be used, for
299 2 : // example, by the encryption-at-rest custom option (within the Cockroach
300 2 : // repository) to reopen the file registry.
301 2 : for i := range t.testOpts.CustomOpts {
302 0 : if err := t.testOpts.CustomOpts[i].Open(t.opts); err != nil {
303 0 : return err
304 0 : }
305 : }
306 2 : dir := t.dir
307 2 : if len(t.dbs) > 1 {
308 1 : dir = path.Join(dir, fmt.Sprintf("db%d", dbID.slot()))
309 1 : }
310 2 : t.dbs[dbID.slot()-1], err = pebble.Open(dir, t.opts)
311 2 : if err != nil {
312 0 : return err
313 0 : }
314 2 : return err
315 : })
316 2 : t.opts.Cache.Unref()
317 2 : return err
318 : }
319 :
320 1 : func (t *Test) saveInMemoryDataInternal() error {
321 1 : if rootFS := vfs.Root(t.opts.FS); rootFS != vfs.Default {
322 1 : // t.opts.FS is an in-memory system; copy it to disk.
323 1 : if err := os.RemoveAll(t.dir); err != nil {
324 0 : return err
325 0 : }
326 1 : if _, err := vfs.Clone(rootFS, vfs.Default, t.dir, t.dir); err != nil {
327 0 : return err
328 0 : }
329 : }
330 1 : if t.testOpts.sharedStorageEnabled {
331 0 : if err := copyRemoteStorage(t.testOpts.sharedStorageFS, filepath.Join(t.dir, "shared")); err != nil {
332 0 : return err
333 0 : }
334 : }
335 1 : if t.testOpts.externalStorageEnabled {
336 0 : if err := copyRemoteStorage(t.testOpts.externalStorageFS, filepath.Join(t.dir, "external")); err != nil {
337 0 : return err
338 0 : }
339 : }
340 1 : return nil
341 : }
342 :
343 0 : func copyRemoteStorage(fs remote.Storage, outputDir string) error {
344 0 : if err := vfs.Default.MkdirAll(outputDir, 0755); err != nil {
345 0 : return err
346 0 : }
347 0 : objs, err := fs.List("", "")
348 0 : if err != nil {
349 0 : return err
350 0 : }
351 0 : for i := range objs {
352 0 : reader, readSize, err := fs.ReadObject(context.TODO(), objs[i])
353 0 : if err != nil {
354 0 : return err
355 0 : }
356 0 : buf := make([]byte, readSize)
357 0 : if err := reader.ReadAt(context.TODO(), buf, 0); err != nil {
358 0 : return err
359 0 : }
360 0 : outputPath := vfs.Default.PathJoin(outputDir, objs[i])
361 0 : outputFile, err := vfs.Default.Create(outputPath, vfs.WriteCategoryUnspecified)
362 0 : if err != nil {
363 0 : return err
364 0 : }
365 0 : if _, err := outputFile.Write(buf); err != nil {
366 0 : outputFile.Close()
367 0 : return err
368 0 : }
369 0 : if err := outputFile.Close(); err != nil {
370 0 : return err
371 0 : }
372 : }
373 0 : return nil
374 : }
375 :
376 : // If an in-memory FS is being used, save the contents to disk.
377 1 : func (t *Test) saveInMemoryData() {
378 1 : if err := t.saveInMemoryDataInternal(); err != nil {
379 0 : t.opts.Logger.Infof("unable to save data: %s: %v", t.dir, err)
380 0 : }
381 : }
382 :
383 : // Step runs one single operation, returning: whether there are additional
384 : // operations remaining; the operation's output; and an error if any occurred
385 : // while running the operation.
386 : //
387 : // Step may be used instead of Execute to advance a test one operation at a
388 : // time.
389 1 : func (t *Test) Step() (more bool, operationOutput string, err error) {
390 1 : more = t.step(t.h, func(format string, args ...interface{}) {
391 1 : operationOutput = fmt.Sprintf(format, args...)
392 1 : })
393 1 : err = t.h.Error()
394 1 : return more, operationOutput, err
395 : }
396 :
397 2 : func (t *Test) step(h *history, optionalRecordf func(string, ...interface{})) bool {
398 2 : if t.idx >= len(t.ops) {
399 2 : return false
400 2 : }
401 2 : t.runOp(t.idx, h.recorder(-1 /* thread */, t.idx, optionalRecordf))
402 2 : t.idx++
403 2 : return true
404 : }
405 :
406 : // runOp runs t.ops[idx] with t.opTimeout.
407 2 : func (t *Test) runOp(idx int, h historyRecorder) {
408 2 : op := t.ops[idx]
409 2 : var timer *time.Timer
410 2 : if t.opTimeout > 0 {
411 1 : opTimeout := t.opTimeout
412 1 : switch op.(type) {
413 1 : case *compactOp, *downloadOp, *newSnapshotOp, *ingestOp, *ingestAndExciseOp, *ingestExternalFilesOp:
414 1 : // These ops can be very slow, especially if we end up with many tiny
415 1 : // tables. Bump up the timout by a factor.
416 1 : opTimeout *= 4
417 : }
418 1 : timer = time.AfterFunc(opTimeout, func() {
419 0 : panic(fmt.Sprintf("operation took longer than %s: %s", opTimeout, op.String()))
420 : })
421 : }
422 2 : op.run(t, h)
423 2 : if timer != nil {
424 1 : timer.Stop()
425 1 : }
426 : }
427 :
428 2 : func (t *Test) setBatch(id objID, b *pebble.Batch) {
429 2 : if id.tag() != batchTag {
430 0 : panic(fmt.Sprintf("invalid batch ID: %s", id))
431 : }
432 2 : t.batches[id.slot()] = b
433 : }
434 :
435 2 : func (t *Test) setIter(id objID, i *pebble.Iterator) {
436 2 : if id.tag() != iterTag {
437 0 : panic(fmt.Sprintf("invalid iter ID: %s", id))
438 : }
439 2 : t.iters[id.slot()] = &retryableIter{
440 2 : iter: i,
441 2 : lastKey: nil,
442 2 : needRetry: t.testOpts.RetryPolicy,
443 2 : }
444 : }
445 :
446 : type readerCloser interface {
447 : pebble.Reader
448 : io.Closer
449 : }
450 :
451 2 : func (t *Test) setSnapshot(id objID, s readerCloser) {
452 2 : if id.tag() != snapTag {
453 0 : panic(fmt.Sprintf("invalid snapshot ID: %s", id))
454 : }
455 2 : t.snapshots[id.slot()] = s
456 : }
457 :
458 2 : func (t *Test) setExternalObj(id objID, meta externalObjMeta) {
459 2 : if id.tag() != externalObjTag {
460 0 : panic(fmt.Sprintf("invalid external object ID: %s", id))
461 : }
462 2 : t.externalObjs[id.slot()] = meta
463 : }
464 :
465 2 : func (t *Test) getExternalObj(id objID) externalObjMeta {
466 2 : if id.tag() != externalObjTag || t.externalObjs[id.slot()].sstMeta == nil {
467 0 : panic(fmt.Sprintf("metamorphic test internal error: invalid external object ID: %s", id))
468 : }
469 2 : return t.externalObjs[id.slot()]
470 : }
471 :
472 2 : func (t *Test) clearObj(id objID) {
473 2 : switch id.tag() {
474 2 : case dbTag:
475 2 : t.dbs[id.slot()-1] = nil
476 2 : case batchTag:
477 2 : t.batches[id.slot()] = nil
478 2 : case iterTag:
479 2 : t.iters[id.slot()] = nil
480 2 : case snapTag:
481 2 : t.snapshots[id.slot()] = nil
482 0 : default:
483 0 : panic(fmt.Sprintf("cannot clear ID: %s", id))
484 : }
485 : }
486 :
487 2 : func (t *Test) getBatch(id objID) *pebble.Batch {
488 2 : if id.tag() != batchTag || t.batches[id.slot()] == nil {
489 0 : panic(fmt.Sprintf("metamorphic test internal error: invalid batch ID: %s", id))
490 : }
491 2 : return t.batches[id.slot()]
492 : }
493 :
494 2 : func (t *Test) getCloser(id objID) io.Closer {
495 2 : switch id.tag() {
496 2 : case dbTag:
497 2 : return t.dbs[id.slot()-1]
498 2 : case batchTag:
499 2 : return t.batches[id.slot()]
500 2 : case iterTag:
501 2 : return t.iters[id.slot()]
502 2 : case snapTag:
503 2 : return t.snapshots[id.slot()]
504 0 : default:
505 0 : panic(fmt.Sprintf("cannot close ID: %s", id))
506 : }
507 : }
508 :
509 2 : func (t *Test) getIter(id objID) *retryableIter {
510 2 : if id.tag() != iterTag {
511 0 : panic(fmt.Sprintf("invalid iter ID: %s", id))
512 : }
513 2 : return t.iters[id.slot()]
514 : }
515 :
516 2 : func (t *Test) getReader(id objID) pebble.Reader {
517 2 : switch id.tag() {
518 2 : case dbTag:
519 2 : return t.dbs[id.slot()-1]
520 2 : case batchTag:
521 2 : return t.batches[id.slot()]
522 2 : case snapTag:
523 2 : return t.snapshots[id.slot()]
524 0 : default:
525 0 : panic(fmt.Sprintf("invalid reader ID: %s", id))
526 : }
527 : }
528 :
529 2 : func (t *Test) getWriter(id objID) pebble.Writer {
530 2 : switch id.tag() {
531 2 : case dbTag:
532 2 : return t.dbs[id.slot()-1]
533 2 : case batchTag:
534 2 : return t.batches[id.slot()]
535 0 : default:
536 0 : panic(fmt.Sprintf("invalid writer ID: %s", id))
537 : }
538 : }
539 :
540 2 : func (t *Test) getDB(id objID) *pebble.DB {
541 2 : switch id.tag() {
542 2 : case dbTag:
543 2 : return t.dbs[id.slot()-1]
544 0 : default:
545 0 : panic(fmt.Sprintf("invalid writer tag: %v", id.tag()))
546 : }
547 : }
548 :
549 : // Compute the synchronization points between operations. When operating
550 : // with more than 1 thread, operations must synchronize access to shared
551 : // objects. Compute two slices the same length as ops.
552 : //
553 : // opsWaitOn: the value v at index i indicates that operation i must wait
554 : // for the operation at index v to finish before it may run. NB: v < i
555 : //
556 : // opsDone: the channel at index i must be closed when the operation at index i
557 : // completes. This slice is sparse. Operations that are never used as
558 : // synchronization points may have a nil channel.
559 2 : func computeSynchronizationPoints(ops []op) (opsWaitOn [][]int, opsDone []chan struct{}) {
560 2 : opsDone = make([]chan struct{}, len(ops)) // operation index -> done channel
561 2 : opsWaitOn = make([][]int, len(ops)) // operation index -> operation index
562 2 : lastOpReference := make(map[objID]int) // objID -> operation index
563 2 : for i, o := range ops {
564 2 : // Find the last operation that involved the same receiver object. We at
565 2 : // least need to wait on that operation.
566 2 : receiver := o.receiver()
567 2 : waitIndex, ok := lastOpReference[receiver]
568 2 : lastOpReference[receiver] = i
569 2 : if !ok {
570 2 : // Only valid for i=0. For all other operations, the receiver should
571 2 : // have been referenced by some other operation before it's used as
572 2 : // a receiver.
573 2 : if i != 0 && receiver.tag() != dbTag {
574 0 : panic(fmt.Sprintf("op %s on receiver %s; first reference of %s", ops[i].String(), receiver, receiver))
575 : }
576 : // The initOp is a little special. We do want to store the objects it's
577 : // syncing on, in `lastOpReference`.
578 2 : if i != 0 {
579 0 : continue
580 : }
581 : }
582 :
583 : // The last operation that referenced `receiver` is the one at index
584 : // `waitIndex`. All operations with the same receiver are performed on
585 : // the same thread. We only need to synchronize on the operation at
586 : // `waitIndex` if `receiver` isn't also the receiver on that operation
587 : // too.
588 2 : if ops[waitIndex].receiver() != receiver {
589 2 : opsWaitOn[i] = append(opsWaitOn[i], waitIndex)
590 2 : }
591 :
592 : // In additional to synchronizing on the operation's receiver operation,
593 : // we may need to synchronize on additional objects. For example,
594 : // batch0.Commit() must synchronize its receiver, batch0, but also on
595 : // the DB since it mutates database state.
596 2 : for _, syncObjID := range o.syncObjs() {
597 2 : if vi, vok := lastOpReference[syncObjID]; vok {
598 2 : if vi == i {
599 0 : panic(fmt.Sprintf("%s has %s as syncObj multiple times", ops[i].String(), syncObjID))
600 : }
601 2 : opsWaitOn[i] = append(opsWaitOn[i], vi)
602 : }
603 2 : lastOpReference[syncObjID] = i
604 : }
605 :
606 2 : waitIndexes := opsWaitOn[i]
607 2 : sort.Ints(waitIndexes)
608 2 : for _, waitIndex := range waitIndexes {
609 2 : // If this is the first operation that must wait on the operation at
610 2 : // `waitIndex`, then there will be no channel for the operation yet.
611 2 : // Create one.
612 2 : if opsDone[waitIndex] == nil {
613 2 : opsDone[waitIndex] = make(chan struct{})
614 2 : }
615 : }
616 : }
617 2 : return opsWaitOn, opsDone
618 : }
|