Line data Source code
1 : // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package base
6 :
7 : import (
8 : "context"
9 : "fmt"
10 : "time"
11 :
12 : "github.com/cockroachdb/pebble/internal/humanize"
13 : "github.com/cockroachdb/pebble/internal/treeprinter"
14 : "github.com/cockroachdb/redact"
15 : )
16 :
17 : // InternalIterator iterates over a DB's key/value pairs in key order. Unlike
18 : // the Iterator interface, the returned keys are InternalKeys composed of the
19 : // user-key, a sequence number and a key kind. In forward iteration, key/value
20 : // pairs for identical user-keys are returned in descending sequence order. In
21 : // reverse iteration, key/value pairs for identical user-keys are returned in
22 : // ascending sequence order.
23 : //
24 : // InternalIterators provide 5 absolute positioning methods and 2 relative
25 : // positioning methods. The absolute positioning methods are:
26 : //
27 : // - SeekGE
28 : // - SeekPrefixGE
29 : // - SeekLT
30 : // - First
31 : // - Last
32 : //
33 : // The relative positioning methods are:
34 : //
35 : // - Next
36 : // - Prev
37 : //
38 : // The relative positioning methods can be used in conjunction with any of the
39 : // absolute positioning methods with one exception: SeekPrefixGE does not
40 : // support reverse iteration via Prev. It is undefined to call relative
41 : // positioning methods without ever calling an absolute positioning method.
42 : //
43 : // InternalIterators can optionally implement a prefix iteration mode. This
44 : // mode is entered by calling SeekPrefixGE and exited by any other absolute
45 : // positioning method (SeekGE, SeekLT, First, Last). When in prefix iteration
46 : // mode, a call to Next will advance to the next key which has the same
47 : // "prefix" as the one supplied to SeekPrefixGE. Note that "prefix" in this
48 : // context is not a strict byte prefix, but defined by byte equality for the
49 : // result of the Comparer.Split method. An InternalIterator is not required to
50 : // support prefix iteration mode, and can implement SeekPrefixGE by forwarding
51 : // to SeekGE. When the iteration prefix is exhausted, it is not valid to call
52 : // Next on an internal iterator that's already returned (nil,nilv) or a key
53 : // beyond the prefix.
54 : //
55 : // Bounds, [lower, upper), can be set on iterators, either using the SetBounds()
56 : // function in the interface, or in implementation specific ways during iterator
57 : // creation. The forward positioning routines (SeekGE, First, and Next) only
58 : // check the upper bound. The reverse positioning routines (SeekLT, Last, and
59 : // Prev) only check the lower bound. It is up to the caller to ensure that the
60 : // forward positioning routines respect the lower bound and the reverse
61 : // positioning routines respect the upper bound (i.e. calling SeekGE instead of
62 : // First if there is a lower bound, and SeekLT instead of Last if there is an
63 : // upper bound). This imposition is done in order to elevate that enforcement to
64 : // the caller (generally pebble.Iterator or pebble.mergingIter) rather than
65 : // having it duplicated in every InternalIterator implementation.
66 : //
67 : // Additionally, the caller needs to ensure that SeekGE/SeekPrefixGE are not
68 : // called with a key > the upper bound, and SeekLT is not called with a key <
69 : // the lower bound. InternalIterator implementations are required to respect
70 : // the iterator bounds, never returning records outside of the bounds with one
71 : // exception: an iterator may generate synthetic RANGEDEL marker records. See
72 : // levelIter.syntheticBoundary for the sole existing example of this behavior.
73 : // Specifically, levelIter can return synthetic keys whose user key is equal to
74 : // the lower/upper bound.
75 : //
76 : // The bounds provided to an internal iterator must remain valid until a
77 : // subsequent call to SetBounds has returned. This requirement exists so that
78 : // iterator implementations may compare old and new bounds to apply low-level
79 : // optimizations. The pebble.Iterator satisfies this requirement by maintaining
80 : // two bound buffers and switching between them.
81 : //
82 : // An iterator must be closed after use, but it is not necessary to read an
83 : // iterator until exhaustion.
84 : //
85 : // An iterator is not goroutine-safe, but it is safe to use multiple iterators
86 : // concurrently, either in separate goroutines or switching between the
87 : // iterators in a single goroutine.
88 : //
89 : // It is also safe to use an iterator concurrently with modifying its
90 : // underlying DB, if that DB permits modification. However, the resultant
91 : // key/value pairs are not guaranteed to be a consistent snapshot of that DB
92 : // at a particular point in time.
93 : //
94 : // InternalIterators accumulate errors encountered during operation, exposing
95 : // them through the Error method. All of the absolute positioning methods
96 : // reset any accumulated error before positioning. Relative positioning
97 : // methods return without advancing if the iterator has accumulated an error.
98 : //
99 : // nilv == shorthand for LazyValue{}, which represents a nil value.
100 : type InternalIterator interface {
101 : // SeekGE moves the iterator to the first key/value pair whose key is greater
102 : // than or equal to the given key. Returns the key and value if the iterator
103 : // is pointing at a valid entry, and (nil, nilv) otherwise. Note that SeekGE
104 : // only checks the upper bound. It is up to the caller to ensure that key
105 : // is greater than or equal to the lower bound.
106 : SeekGE(key []byte, flags SeekGEFlags) *InternalKV
107 :
108 : // SeekPrefixGE moves the iterator to the first key/value pair whose key is
109 : // greater than or equal to the given key. Returns the key and value if the
110 : // iterator is pointing at a valid entry, and (nil, nilv) otherwise. Note that
111 : // SeekPrefixGE only checks the upper bound. It is up to the caller to ensure
112 : // that key is greater than or equal to the lower bound.
113 : //
114 : // The prefix argument is used by some InternalIterator implementations
115 : // (e.g. sstable.Reader) to avoid expensive operations. This operation is
116 : // only useful when a user-defined Split function is supplied to the
117 : // Comparer for the DB. The supplied prefix will be the prefix of the given
118 : // key returned by that Split function. If the iterator is able to determine
119 : // that no key with the prefix exists, it can return (nil,nilv). Unlike
120 : // SeekGE, this is not an indication that iteration is exhausted. The prefix
121 : // byte slice is guaranteed to be stable until the next absolute positioning
122 : // operation.
123 : //
124 : // Note that the iterator may return keys not matching the prefix. It is up
125 : // to the caller to check if the prefix matches.
126 : //
127 : // Calling SeekPrefixGE places the receiver into prefix iteration mode. Once
128 : // in this mode, reverse iteration may not be supported and will return an
129 : // error. Note that pebble/Iterator.SeekPrefixGE has this same restriction on
130 : // not supporting reverse iteration in prefix iteration mode until a
131 : // different positioning routine (SeekGE, SeekLT, First or Last) switches the
132 : // iterator out of prefix iteration.
133 : SeekPrefixGE(prefix, key []byte, flags SeekGEFlags) *InternalKV
134 :
135 : // SeekLT moves the iterator to the last key/value pair whose key is less
136 : // than the given key. Returns the key and value if the iterator is pointing
137 : // at a valid entry, and (nil, nilv) otherwise. Note that SeekLT only checks
138 : // the lower bound. It is up to the caller to ensure that key is less than
139 : // the upper bound.
140 : SeekLT(key []byte, flags SeekLTFlags) *InternalKV
141 :
142 : // First moves the iterator the first key/value pair. Returns the key and
143 : // value if the iterator is pointing at a valid entry, and (nil, nilv)
144 : // otherwise. Note that First only checks the upper bound. It is up to the
145 : // caller to ensure that First() is not called when there is a lower bound,
146 : // and instead call SeekGE(lower).
147 : First() *InternalKV
148 :
149 : // Last moves the iterator the last key/value pair. Returns the key and
150 : // value if the iterator is pointing at a valid entry, and (nil, nilv)
151 : // otherwise. Note that Last only checks the lower bound. It is up to the
152 : // caller to ensure that Last() is not called when there is an upper bound,
153 : // and instead call SeekLT(upper).
154 : Last() *InternalKV
155 :
156 : // Next moves the iterator to the next key/value pair. Returns the key and
157 : // value if the iterator is pointing at a valid entry, and (nil, nilv)
158 : // otherwise. Note that Next only checks the upper bound. It is up to the
159 : // caller to ensure that key is greater than or equal to the lower bound.
160 : //
161 : // It is valid to call Next when the iterator is positioned before the first
162 : // key/value pair due to either a prior call to SeekLT or Prev which returned
163 : // (nil, nilv). It is not allowed to call Next when the previous call to SeekGE,
164 : // SeekPrefixGE or Next returned (nil, nilv).
165 : Next() *InternalKV
166 :
167 : // NextPrefix moves the iterator to the next key/value pair with a different
168 : // prefix than the key at the current iterator position. Returns the key and
169 : // value if the iterator is pointing at a valid entry, and (nil, nil)
170 : // otherwise. Note that NextPrefix only checks the upper bound. It is up to
171 : // the caller to ensure that key is greater than or equal to the lower
172 : // bound.
173 : //
174 : // NextPrefix is passed the immediate successor to the current prefix key. A
175 : // valid implementation of NextPrefix is to call SeekGE with succKey.
176 : //
177 : // It is not allowed to call NextPrefix when the previous call was a reverse
178 : // positioning operation or a call to a forward positioning method that
179 : // returned (nil, nilv). It is also not allowed to call NextPrefix when the
180 : // iterator is in prefix iteration mode.
181 : NextPrefix(succKey []byte) *InternalKV
182 :
183 : // Prev moves the iterator to the previous key/value pair. Returns the key
184 : // and value if the iterator is pointing at a valid entry, and (nil, nilv)
185 : // otherwise. Note that Prev only checks the lower bound. It is up to the
186 : // caller to ensure that key is less than the upper bound.
187 : //
188 : // It is valid to call Prev when the iterator is positioned after the last
189 : // key/value pair due to either a prior call to SeekGE or Next which returned
190 : // (nil, nilv). It is not allowed to call Prev when the previous call to SeekLT
191 : // or Prev returned (nil, nilv).
192 : Prev() *InternalKV
193 :
194 : // Error returns any accumulated error. It may not include errors returned
195 : // to the client when calling LazyValue.Value().
196 : Error() error
197 :
198 : // Close closes the iterator and returns any accumulated error. Exhausting
199 : // all the key/value pairs in a table is not considered to be an error.
200 : //
201 : // Once Close is called, the iterator should not be used again. Specific
202 : // implementations may support multiple calls to Close (but no other calls
203 : // after the first Close).
204 : Close() error
205 :
206 : // SetBounds sets the lower and upper bounds for the iterator. Note that the
207 : // result of Next and Prev will be undefined until the iterator has been
208 : // repositioned with SeekGE, SeekPrefixGE, SeekLT, First, or Last.
209 : //
210 : // The bounds provided must remain valid until a subsequent call to
211 : // SetBounds has returned. This requirement exists so that iterator
212 : // implementations may compare old and new bounds to apply low-level
213 : // optimizations.
214 : SetBounds(lower, upper []byte)
215 :
216 : // SetContext replaces the context provided at iterator creation, or the
217 : // last one provided by SetContext.
218 : SetContext(ctx context.Context)
219 :
220 : fmt.Stringer
221 :
222 : IteratorDebug
223 : }
224 :
225 : // TopLevelIterator extends InternalIterator to include an additional absolute
226 : // positioning method, SeekPrefixGEStrict.
227 : type TopLevelIterator interface {
228 : InternalIterator
229 :
230 : // SeekPrefixGEStrict extends InternalIterator.SeekPrefixGE with a guarantee
231 : // that the iterator only returns keys matching the prefix.
232 : SeekPrefixGEStrict(prefix, key []byte, flags SeekGEFlags) *InternalKV
233 : }
234 :
235 : // SeekGEFlags holds flags that may configure the behavior of a forward seek.
236 : // Not all flags are relevant to all iterators.
237 : type SeekGEFlags uint8
238 :
239 : const (
240 : seekGEFlagTrySeekUsingNext uint8 = iota
241 : seekGEFlagRelativeSeek
242 : seekGEFlagBatchJustRefreshed
243 : )
244 :
245 : // SeekGEFlagsNone is the default value of SeekGEFlags, with all flags disabled.
246 : const SeekGEFlagsNone = SeekGEFlags(0)
247 :
248 : // TrySeekUsingNext indicates whether a performance optimization was enabled
249 : // by a caller, indicating the caller has not done any action to move this
250 : // iterator beyond the first key that would be found if this iterator were to
251 : // honestly do the intended seek. For example, say the caller did a
252 : // SeekGE(k1...), followed by SeekGE(k2...) where k1 <= k2, without any
253 : // intermediate positioning calls. The caller can safely specify true for this
254 : // parameter in the second call. As another example, say the caller did do one
255 : // call to Next between the two Seek calls, and k1 < k2. Again, the caller can
256 : // safely specify a true value for this parameter. Note that a false value is
257 : // always safe. The callee is free to ignore the true value if its
258 : // implementation does not permit this optimization.
259 : //
260 : // We make the caller do this determination since a string comparison of k1, k2
261 : // is not necessarily cheap, and there may be many iterators in the iterator
262 : // stack. Doing it once at the root of the iterator stack is cheaper.
263 : //
264 : // This optimization could also be applied to SeekLT (where it would be
265 : // trySeekUsingPrev). We currently only do it for SeekPrefixGE and SeekGE
266 : // because this is where this optimization helps the performance of CockroachDB.
267 : // The SeekLT cases in CockroachDB are typically accompanied with bounds that
268 : // change between seek calls, and is optimized inside certain iterator
269 : // implementations, like singleLevelIterator, without any extra parameter
270 : // passing (though the same amortization of string comparisons could be done to
271 : // improve that optimization, by making the root of the iterator stack do it).
272 1 : func (s SeekGEFlags) TrySeekUsingNext() bool { return (s & (1 << seekGEFlagTrySeekUsingNext)) != 0 }
273 :
274 : // RelativeSeek is set when in the course of a forward positioning operation, a
275 : // higher-level iterator seeks a lower-level iterator to a larger key than the
276 : // one at the current iterator position.
277 : //
278 : // Concretely, this occurs when the merging iterator observes a range deletion
279 : // covering the key at a level's current position, and the merging iterator
280 : // seeks the level to the range deletion's end key. During lazy-combined
281 : // iteration, this flag signals to the level iterator that the seek is NOT an
282 : // absolute-positioning operation from the perspective of the pebble.Iterator,
283 : // and the level iterator must look for range keys in tables between the current
284 : // iterator position and the new seeked position.
285 1 : func (s SeekGEFlags) RelativeSeek() bool { return (s & (1 << seekGEFlagRelativeSeek)) != 0 }
286 :
287 : // BatchJustRefreshed is set by Seek[Prefix]GE when an iterator's view of an
288 : // indexed batch was just refreshed. It serves as a signal to the batch iterator
289 : // to ignore the TrySeekUsingNext optimization, because the external knowledge
290 : // imparted by the TrySeekUsingNext flag does not apply to the batch iterator's
291 : // position. See (pebble.Iterator).batchJustRefreshed.
292 1 : func (s SeekGEFlags) BatchJustRefreshed() bool { return (s & (1 << seekGEFlagBatchJustRefreshed)) != 0 }
293 :
294 : // EnableTrySeekUsingNext returns the provided flags with the
295 : // try-seek-using-next optimization enabled. See TrySeekUsingNext for an
296 : // explanation of this optimization.
297 1 : func (s SeekGEFlags) EnableTrySeekUsingNext() SeekGEFlags {
298 1 : return s | (1 << seekGEFlagTrySeekUsingNext)
299 1 : }
300 :
301 : // DisableTrySeekUsingNext returns the provided flags with the
302 : // try-seek-using-next optimization disabled.
303 1 : func (s SeekGEFlags) DisableTrySeekUsingNext() SeekGEFlags {
304 1 : return s &^ (1 << seekGEFlagTrySeekUsingNext)
305 1 : }
306 :
307 : // EnableRelativeSeek returns the provided flags with the relative-seek flag
308 : // enabled. See RelativeSeek for an explanation of this flag's use.
309 1 : func (s SeekGEFlags) EnableRelativeSeek() SeekGEFlags {
310 1 : return s | (1 << seekGEFlagRelativeSeek)
311 1 : }
312 :
313 : // DisableRelativeSeek returns the provided flags with the relative-seek flag
314 : // disabled.
315 1 : func (s SeekGEFlags) DisableRelativeSeek() SeekGEFlags {
316 1 : return s &^ (1 << seekGEFlagRelativeSeek)
317 1 : }
318 :
319 : // EnableBatchJustRefreshed returns the provided flags with the
320 : // batch-just-refreshed bit set. See BatchJustRefreshed for an explanation of
321 : // this flag.
322 1 : func (s SeekGEFlags) EnableBatchJustRefreshed() SeekGEFlags {
323 1 : return s | (1 << seekGEFlagBatchJustRefreshed)
324 1 : }
325 :
326 : // DisableBatchJustRefreshed returns the provided flags with the
327 : // batch-just-refreshed bit unset.
328 1 : func (s SeekGEFlags) DisableBatchJustRefreshed() SeekGEFlags {
329 1 : return s &^ (1 << seekGEFlagBatchJustRefreshed)
330 1 : }
331 :
332 : // SeekLTFlags holds flags that may configure the behavior of a reverse seek.
333 : // Not all flags are relevant to all iterators.
334 : type SeekLTFlags uint8
335 :
336 : const (
337 : seekLTFlagRelativeSeek uint8 = iota
338 : )
339 :
340 : // SeekLTFlagsNone is the default value of SeekLTFlags, with all flags disabled.
341 : const SeekLTFlagsNone = SeekLTFlags(0)
342 :
343 : // RelativeSeek is set when in the course of a reverse positioning operation, a
344 : // higher-level iterator seeks a lower-level iterator to a smaller key than the
345 : // one at the current iterator position.
346 : //
347 : // Concretely, this occurs when the merging iterator observes a range deletion
348 : // covering the key at a level's current position, and the merging iterator
349 : // seeks the level to the range deletion's start key. During lazy-combined
350 : // iteration, this flag signals to the level iterator that the seek is NOT an
351 : // absolute-positioning operation from the perspective of the pebble.Iterator,
352 : // and the level iterator must look for range keys in tables between the current
353 : // iterator position and the new seeked position.
354 1 : func (s SeekLTFlags) RelativeSeek() bool { return s&(1<<seekLTFlagRelativeSeek) != 0 }
355 :
356 : // EnableRelativeSeek returns the provided flags with the relative-seek flag
357 : // enabled. See RelativeSeek for an explanation of this flag's use.
358 1 : func (s SeekLTFlags) EnableRelativeSeek() SeekLTFlags {
359 1 : return s | (1 << seekLTFlagRelativeSeek)
360 1 : }
361 :
362 : // DisableRelativeSeek returns the provided flags with the relative-seek flag
363 : // disabled.
364 1 : func (s SeekLTFlags) DisableRelativeSeek() SeekLTFlags {
365 1 : return s &^ (1 << seekLTFlagRelativeSeek)
366 1 : }
367 :
368 : // InternalIteratorStats contains miscellaneous stats produced by
369 : // InternalIterators that are part of the InternalIterator tree. Not every
370 : // field is relevant for an InternalIterator implementation. The field values
371 : // are aggregated as one goes up the InternalIterator tree.
372 : type InternalIteratorStats struct {
373 : // Bytes in the loaded blocks. If the block was compressed, this is the
374 : // compressed bytes. Currently, only the index blocks, data blocks
375 : // containing points, and filter blocks are included.
376 : BlockBytes uint64
377 : // Subset of BlockBytes that were in the block cache.
378 : BlockBytesInCache uint64
379 : // BlockReadDuration accumulates the duration spent fetching blocks
380 : // due to block cache misses.
381 : // TODO(sumeer): this currently excludes the time spent in Reader creation,
382 : // and in reading the rangedel and rangekey blocks. Fix that.
383 : BlockReadDuration time.Duration
384 : // The following can repeatedly count the same points if they are iterated
385 : // over multiple times. Additionally, they may count a point twice when
386 : // switching directions. The latter could be improved if needed.
387 :
388 : // Bytes in keys that were iterated over. Currently, only point keys are
389 : // included.
390 : KeyBytes uint64
391 : // Bytes in values that were iterated over. Currently, only point values are
392 : // included. For separated values, this is the size of the handle.
393 : ValueBytes uint64
394 : // The count of points iterated over.
395 : PointCount uint64
396 : // Points that were iterated over that were covered by range tombstones. It
397 : // can be useful for discovering instances of
398 : // https://github.com/cockroachdb/pebble/issues/1070.
399 : PointsCoveredByRangeTombstones uint64
400 :
401 : // Stats related to points in value blocks encountered during iteration.
402 : // These are useful to understand outliers, since typical user facing
403 : // iteration should tend to only look at the latest point, and hence have
404 : // the following stats close to 0.
405 : SeparatedPointValue struct {
406 : // Count is a count of points that were in value blocks. This is not a
407 : // subset of PointCount: PointCount is produced by mergingIter and if
408 : // positioned once, and successful in returning a point, will have a
409 : // PointCount of 1, regardless of how many sstables (and memtables etc.)
410 : // in the heap got positioned. The count here includes every sstable
411 : // iterator that got positioned in the heap.
412 : Count uint64
413 : // ValueBytes represent the total byte length of the values (in value
414 : // blocks) of the points corresponding to Count.
415 : ValueBytes uint64
416 : // ValueBytesFetched is the total byte length of the values (in value
417 : // blocks) that were retrieved.
418 : ValueBytesFetched uint64
419 : }
420 : }
421 :
422 : // Merge merges the stats in from into the given stats.
423 1 : func (s *InternalIteratorStats) Merge(from InternalIteratorStats) {
424 1 : s.BlockBytes += from.BlockBytes
425 1 : s.BlockBytesInCache += from.BlockBytesInCache
426 1 : s.BlockReadDuration += from.BlockReadDuration
427 1 : s.KeyBytes += from.KeyBytes
428 1 : s.ValueBytes += from.ValueBytes
429 1 : s.PointCount += from.PointCount
430 1 : s.PointsCoveredByRangeTombstones += from.PointsCoveredByRangeTombstones
431 1 : s.SeparatedPointValue.Count += from.SeparatedPointValue.Count
432 1 : s.SeparatedPointValue.ValueBytes += from.SeparatedPointValue.ValueBytes
433 1 : s.SeparatedPointValue.ValueBytesFetched += from.SeparatedPointValue.ValueBytesFetched
434 1 : }
435 :
436 0 : func (s *InternalIteratorStats) String() string {
437 0 : return redact.StringWithoutMarkers(s)
438 0 : }
439 :
440 : // SafeFormat implements the redact.SafeFormatter interface.
441 1 : func (s *InternalIteratorStats) SafeFormat(p redact.SafePrinter, verb rune) {
442 1 : p.Printf("blocks: %s cached",
443 1 : humanize.Bytes.Uint64(s.BlockBytesInCache),
444 1 : )
445 1 : if s.BlockBytes != s.BlockBytesInCache || s.BlockReadDuration != 0 {
446 1 : p.Printf(", %s not cached (read time: %s)",
447 1 : humanize.Bytes.Uint64(s.BlockBytes-s.BlockBytesInCache),
448 1 : humanize.FormattedString(s.BlockReadDuration.String()),
449 1 : )
450 1 : }
451 1 : p.Printf("; points: %s", humanize.Count.Uint64(s.PointCount))
452 1 :
453 1 : if s.PointsCoveredByRangeTombstones != 0 {
454 0 : p.Printf("(%s tombstoned)", humanize.Count.Uint64(s.PointsCoveredByRangeTombstones))
455 0 : }
456 1 : p.Printf(" (%s keys, %s values)",
457 1 : humanize.Bytes.Uint64(s.KeyBytes),
458 1 : humanize.Bytes.Uint64(s.ValueBytes),
459 1 : )
460 1 : if s.SeparatedPointValue.Count != 0 {
461 1 : p.Printf("; separated: %s (%s, %s fetched)",
462 1 : humanize.Count.Uint64(s.SeparatedPointValue.Count),
463 1 : humanize.Bytes.Uint64(s.SeparatedPointValue.ValueBytes),
464 1 : humanize.Bytes.Uint64(s.SeparatedPointValue.ValueBytesFetched))
465 1 : }
466 : }
467 :
468 : // IteratorDebug is an interface implemented by all internal iterators and
469 : // fragment iterators.
470 : type IteratorDebug interface {
471 : // DebugTree prints the entire iterator stack, used for debugging.
472 : //
473 : // Each implementation should perform a single Child/Childf call on tp.
474 : DebugTree(tp treeprinter.Node)
475 : }
476 :
477 : // DebugTree returns the iterator tree as a multi-line string.
478 0 : func DebugTree(iter IteratorDebug) string {
479 0 : tp := treeprinter.New()
480 0 : iter.DebugTree(tp)
481 0 : return tp.String()
482 0 : }
|