/src/hermes/external/llvh/lib/Support/FoldingSet.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- Support/FoldingSet.cpp - Uniquing Hash Set --------------*- C++ -*-===// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | // |
10 | | // This file implements a hash set that can be used to remove duplication of |
11 | | // nodes in a graph. |
12 | | // |
13 | | //===----------------------------------------------------------------------===// |
14 | | |
15 | | #include "llvh/ADT/FoldingSet.h" |
16 | | #include "llvh/ADT/Hashing.h" |
17 | | #include "llvh/Support/Allocator.h" |
18 | | #include "llvh/Support/ErrorHandling.h" |
19 | | #include "llvh/Support/Host.h" |
20 | | #include "llvh/Support/MathExtras.h" |
21 | | #include <cassert> |
22 | | #include <cstring> |
23 | | using namespace llvh; |
24 | | |
25 | | //===----------------------------------------------------------------------===// |
26 | | // FoldingSetNodeIDRef Implementation |
27 | | |
28 | | /// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef, |
29 | | /// used to lookup the node in the FoldingSetBase. |
30 | 4.68M | unsigned FoldingSetNodeIDRef::ComputeHash() const { |
31 | 4.68M | return static_cast<unsigned>(hash_combine_range(Data, Data+Size)); |
32 | 4.68M | } |
33 | | |
34 | 4.84M | bool FoldingSetNodeIDRef::operator==(FoldingSetNodeIDRef RHS) const { |
35 | 4.84M | if (Size != RHS.Size) return false; |
36 | 4.84M | return memcmp(Data, RHS.Data, Size*sizeof(*Data)) == 0; |
37 | 4.84M | } |
38 | | |
39 | | /// Used to compare the "ordering" of two nodes as defined by the |
40 | | /// profiled bits and their ordering defined by memcmp(). |
41 | 0 | bool FoldingSetNodeIDRef::operator<(FoldingSetNodeIDRef RHS) const { |
42 | 0 | if (Size != RHS.Size) |
43 | 0 | return Size < RHS.Size; |
44 | 0 | return memcmp(Data, RHS.Data, Size*sizeof(*Data)) < 0; |
45 | 0 | } |
46 | | |
47 | | //===----------------------------------------------------------------------===// |
48 | | // FoldingSetNodeID Implementation |
49 | | |
50 | | /// Add* - Add various data types to Bit data. |
51 | | /// |
52 | 2.42M | void FoldingSetNodeID::AddPointer(const void *Ptr) { |
53 | | // Note: this adds pointers to the hash using sizes and endianness that |
54 | | // depend on the host. It doesn't matter, however, because hashing on |
55 | | // pointer values is inherently unstable. Nothing should depend on the |
56 | | // ordering of nodes in the folding set. |
57 | 2.42M | static_assert(sizeof(uintptr_t) <= sizeof(unsigned long long), |
58 | 2.42M | "unexpected pointer size"); |
59 | 2.42M | AddInteger(reinterpret_cast<uintptr_t>(Ptr)); |
60 | 2.42M | } |
61 | 0 | void FoldingSetNodeID::AddInteger(signed I) { |
62 | 0 | Bits.push_back(I); |
63 | 0 | } |
64 | 19.0M | void FoldingSetNodeID::AddInteger(unsigned I) { |
65 | 19.0M | Bits.push_back(I); |
66 | 19.0M | } |
67 | 0 | void FoldingSetNodeID::AddInteger(long I) { |
68 | 0 | AddInteger((unsigned long)I); |
69 | 0 | } |
70 | 9.52M | void FoldingSetNodeID::AddInteger(unsigned long I) { |
71 | 9.52M | if (sizeof(long) == sizeof(int)) |
72 | 0 | AddInteger(unsigned(I)); |
73 | 9.52M | else if (sizeof(long) == sizeof(long long)) { |
74 | 9.52M | AddInteger((unsigned long long)I); |
75 | 9.52M | } else { |
76 | 0 | llvm_unreachable("unexpected sizeof(long)"); |
77 | 0 | } |
78 | 9.52M | } |
79 | 0 | void FoldingSetNodeID::AddInteger(long long I) { |
80 | 0 | AddInteger((unsigned long long)I); |
81 | 0 | } |
82 | 9.52M | void FoldingSetNodeID::AddInteger(unsigned long long I) { |
83 | 9.52M | AddInteger(unsigned(I)); |
84 | 9.52M | AddInteger(unsigned(I >> 32)); |
85 | 9.52M | } |
86 | | |
87 | 0 | void FoldingSetNodeID::AddString(StringRef String) { |
88 | 0 | unsigned Size = String.size(); |
89 | 0 | Bits.push_back(Size); |
90 | 0 | if (!Size) return; |
91 | | |
92 | 0 | unsigned Units = Size / 4; |
93 | 0 | unsigned Pos = 0; |
94 | 0 | const unsigned *Base = (const unsigned*) String.data(); |
95 | | |
96 | | // If the string is aligned do a bulk transfer. |
97 | 0 | if (!((intptr_t)Base & 3)) { |
98 | 0 | Bits.append(Base, Base + Units); |
99 | 0 | Pos = (Units + 1) * 4; |
100 | 0 | } else { |
101 | | // Otherwise do it the hard way. |
102 | | // To be compatible with above bulk transfer, we need to take endianness |
103 | | // into account. |
104 | 0 | static_assert(sys::IsBigEndianHost || sys::IsLittleEndianHost, |
105 | 0 | "Unexpected host endianness"); |
106 | 0 | if (sys::IsBigEndianHost) { |
107 | 0 | for (Pos += 4; Pos <= Size; Pos += 4) { |
108 | 0 | unsigned V = ((unsigned char)String[Pos - 4] << 24) | |
109 | 0 | ((unsigned char)String[Pos - 3] << 16) | |
110 | 0 | ((unsigned char)String[Pos - 2] << 8) | |
111 | 0 | (unsigned char)String[Pos - 1]; |
112 | 0 | Bits.push_back(V); |
113 | 0 | } |
114 | 0 | } else { // Little-endian host |
115 | 0 | for (Pos += 4; Pos <= Size; Pos += 4) { |
116 | 0 | unsigned V = ((unsigned char)String[Pos - 1] << 24) | |
117 | 0 | ((unsigned char)String[Pos - 2] << 16) | |
118 | 0 | ((unsigned char)String[Pos - 3] << 8) | |
119 | 0 | (unsigned char)String[Pos - 4]; |
120 | 0 | Bits.push_back(V); |
121 | 0 | } |
122 | 0 | } |
123 | 0 | } |
124 | | |
125 | | // With the leftover bits. |
126 | 0 | unsigned V = 0; |
127 | | // Pos will have overshot size by 4 - #bytes left over. |
128 | | // No need to take endianness into account here - this is always executed. |
129 | 0 | switch (Pos - Size) { |
130 | 0 | case 1: V = (V << 8) | (unsigned char)String[Size - 3]; LLVM_FALLTHROUGH; |
131 | 0 | case 2: V = (V << 8) | (unsigned char)String[Size - 2]; LLVM_FALLTHROUGH; |
132 | 0 | case 3: V = (V << 8) | (unsigned char)String[Size - 1]; break; |
133 | 0 | default: return; // Nothing left. |
134 | 0 | } |
135 | | |
136 | 0 | Bits.push_back(V); |
137 | 0 | } |
138 | | |
139 | | // AddNodeID - Adds the Bit data of another ID to *this. |
140 | 0 | void FoldingSetNodeID::AddNodeID(const FoldingSetNodeID &ID) { |
141 | 0 | Bits.append(ID.Bits.begin(), ID.Bits.end()); |
142 | 0 | } |
143 | | |
144 | | /// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used to |
145 | | /// lookup the node in the FoldingSetBase. |
146 | 4.68M | unsigned FoldingSetNodeID::ComputeHash() const { |
147 | 4.68M | return FoldingSetNodeIDRef(Bits.data(), Bits.size()).ComputeHash(); |
148 | 4.68M | } |
149 | | |
150 | | /// operator== - Used to compare two nodes to each other. |
151 | | /// |
152 | 4.84M | bool FoldingSetNodeID::operator==(const FoldingSetNodeID &RHS) const { |
153 | 4.84M | return *this == FoldingSetNodeIDRef(RHS.Bits.data(), RHS.Bits.size()); |
154 | 4.84M | } |
155 | | |
156 | | /// operator== - Used to compare two nodes to each other. |
157 | | /// |
158 | 4.84M | bool FoldingSetNodeID::operator==(FoldingSetNodeIDRef RHS) const { |
159 | 4.84M | return FoldingSetNodeIDRef(Bits.data(), Bits.size()) == RHS; |
160 | 4.84M | } |
161 | | |
162 | | /// Used to compare the "ordering" of two nodes as defined by the |
163 | | /// profiled bits and their ordering defined by memcmp(). |
164 | 0 | bool FoldingSetNodeID::operator<(const FoldingSetNodeID &RHS) const { |
165 | 0 | return *this < FoldingSetNodeIDRef(RHS.Bits.data(), RHS.Bits.size()); |
166 | 0 | } |
167 | | |
168 | 0 | bool FoldingSetNodeID::operator<(FoldingSetNodeIDRef RHS) const { |
169 | 0 | return FoldingSetNodeIDRef(Bits.data(), Bits.size()) < RHS; |
170 | 0 | } |
171 | | |
172 | | /// Intern - Copy this node's data to a memory region allocated from the |
173 | | /// given allocator and return a FoldingSetNodeIDRef describing the |
174 | | /// interned data. |
175 | | FoldingSetNodeIDRef |
176 | 0 | FoldingSetNodeID::Intern(BumpPtrAllocator &Allocator) const { |
177 | 0 | unsigned *New = Allocator.Allocate<unsigned>(Bits.size()); |
178 | 0 | std::uninitialized_copy(Bits.begin(), Bits.end(), New); |
179 | 0 | return FoldingSetNodeIDRef(New, Bits.size()); |
180 | 0 | } |
181 | | |
182 | | //===----------------------------------------------------------------------===// |
183 | | /// Helper functions for FoldingSetBase. |
184 | | |
185 | | /// GetNextPtr - In order to save space, each bucket is a |
186 | | /// singly-linked-list. In order to make deletion more efficient, we make |
187 | | /// the list circular, so we can delete a node without computing its hash. |
188 | | /// The problem with this is that the start of the hash buckets are not |
189 | | /// Nodes. If NextInBucketPtr is a bucket pointer, this method returns null: |
190 | | /// use GetBucketPtr when this happens. |
191 | 8.76M | static FoldingSetBase::Node *GetNextPtr(void *NextInBucketPtr) { |
192 | | // The low bit is set if this is the pointer back to the bucket. |
193 | 8.76M | if (reinterpret_cast<intptr_t>(NextInBucketPtr) & 1) |
194 | 1.62M | return nullptr; |
195 | | |
196 | 7.13M | return static_cast<FoldingSetBase::Node*>(NextInBucketPtr); |
197 | 8.76M | } |
198 | | |
199 | | |
200 | | /// testing. |
201 | 457k | static void **GetBucketPtr(void *NextInBucketPtr) { |
202 | 457k | intptr_t Ptr = reinterpret_cast<intptr_t>(NextInBucketPtr); |
203 | 457k | assert((Ptr & 1) && "Not a bucket pointer"); |
204 | 457k | return reinterpret_cast<void**>(Ptr & ~intptr_t(1)); |
205 | 457k | } |
206 | | |
207 | | /// GetBucketFor - Hash the specified node ID and return the hash bucket for |
208 | | /// the specified ID. |
209 | 4.68M | static void **GetBucketFor(unsigned Hash, void **Buckets, unsigned NumBuckets) { |
210 | | // NumBuckets is always a power of 2. |
211 | 4.68M | unsigned BucketNum = Hash & (NumBuckets-1); |
212 | 4.68M | return Buckets + BucketNum; |
213 | 4.68M | } |
214 | | |
215 | | /// AllocateBuckets - Allocated initialized bucket memory. |
216 | 739 | static void **AllocateBuckets(unsigned NumBuckets) { |
217 | 739 | void **Buckets = static_cast<void**>(safe_calloc(NumBuckets + 1, |
218 | 739 | sizeof(void*))); |
219 | | // Set the very last bucket to be a non-null "pointer". |
220 | 739 | Buckets[NumBuckets] = reinterpret_cast<void*>(-1); |
221 | 739 | return Buckets; |
222 | 739 | } |
223 | | |
224 | | //===----------------------------------------------------------------------===// |
225 | | // FoldingSetBase Implementation |
226 | | |
227 | 0 | void FoldingSetBase::anchor() {} |
228 | | |
229 | 588 | FoldingSetBase::FoldingSetBase(unsigned Log2InitSize) { |
230 | 588 | assert(5 < Log2InitSize && Log2InitSize < 32 && |
231 | 588 | "Initial hash table size out of range"); |
232 | 588 | NumBuckets = 1 << Log2InitSize; |
233 | 588 | Buckets = AllocateBuckets(NumBuckets); |
234 | 588 | NumNodes = 0; |
235 | 588 | } |
236 | | |
237 | | FoldingSetBase::FoldingSetBase(FoldingSetBase &&Arg) |
238 | 0 | : Buckets(Arg.Buckets), NumBuckets(Arg.NumBuckets), NumNodes(Arg.NumNodes) { |
239 | 0 | Arg.Buckets = nullptr; |
240 | 0 | Arg.NumBuckets = 0; |
241 | 0 | Arg.NumNodes = 0; |
242 | 0 | } |
243 | | |
244 | 0 | FoldingSetBase &FoldingSetBase::operator=(FoldingSetBase &&RHS) { |
245 | 0 | free(Buckets); // This may be null if the set is in a moved-from state. |
246 | 0 | Buckets = RHS.Buckets; |
247 | 0 | NumBuckets = RHS.NumBuckets; |
248 | 0 | NumNodes = RHS.NumNodes; |
249 | 0 | RHS.Buckets = nullptr; |
250 | 0 | RHS.NumBuckets = 0; |
251 | 0 | RHS.NumNodes = 0; |
252 | 0 | return *this; |
253 | 0 | } |
254 | | |
255 | 588 | FoldingSetBase::~FoldingSetBase() { |
256 | 588 | free(Buckets); |
257 | 588 | } |
258 | | |
259 | 0 | void FoldingSetBase::clear() { |
260 | | // Set all but the last bucket to null pointers. |
261 | 0 | memset(Buckets, 0, NumBuckets*sizeof(void*)); |
262 | | |
263 | | // Set the very last bucket to be a non-null "pointer". |
264 | 0 | Buckets[NumBuckets] = reinterpret_cast<void*>(-1); |
265 | | |
266 | | // Reset the node count to zero. |
267 | 0 | NumNodes = 0; |
268 | 0 | } |
269 | | |
270 | 151 | void FoldingSetBase::GrowBucketCount(unsigned NewBucketCount) { |
271 | 151 | assert((NewBucketCount > NumBuckets) && "Can't shrink a folding set with GrowBucketCount"); |
272 | 151 | assert(isPowerOf2_32(NewBucketCount) && "Bad bucket count!"); |
273 | 151 | void **OldBuckets = Buckets; |
274 | 151 | unsigned OldNumBuckets = NumBuckets; |
275 | | |
276 | | // Clear out new buckets. |
277 | 151 | Buckets = AllocateBuckets(NewBucketCount); |
278 | | // Set NumBuckets only if allocation of new buckets was successful. |
279 | 151 | NumBuckets = NewBucketCount; |
280 | 151 | NumNodes = 0; |
281 | | |
282 | | // Walk the old buckets, rehashing nodes into their new place. |
283 | 151 | FoldingSetNodeID TempID; |
284 | 624k | for (unsigned i = 0; i != OldNumBuckets; ++i) { |
285 | 623k | void *Probe = OldBuckets[i]; |
286 | 623k | if (!Probe) continue; |
287 | 1.78M | while (Node *NodeInBucket = GetNextPtr(Probe)) { |
288 | | // Figure out the next link, remove NodeInBucket from the old link. |
289 | 1.24M | Probe = NodeInBucket->getNextInBucket(); |
290 | 1.24M | NodeInBucket->SetNextInBucket(nullptr); |
291 | | |
292 | | // Insert the node into the new bucket, after recomputing the hash. |
293 | 1.24M | InsertNode(NodeInBucket, |
294 | 1.24M | GetBucketFor(ComputeNodeHash(NodeInBucket, TempID), |
295 | 1.24M | Buckets, NumBuckets)); |
296 | 1.24M | TempID.clear(); |
297 | 1.24M | } |
298 | 539k | } |
299 | | |
300 | 151 | free(OldBuckets); |
301 | 151 | } |
302 | | |
303 | | /// GrowHashTable - Double the size of the hash table and rehash everything. |
304 | | /// |
305 | 151 | void FoldingSetBase::GrowHashTable() { |
306 | 151 | GrowBucketCount(NumBuckets * 2); |
307 | 151 | } |
308 | | |
309 | 0 | void FoldingSetBase::reserve(unsigned EltCount) { |
310 | | // This will give us somewhere between EltCount / 2 and |
311 | | // EltCount buckets. This puts us in the load factor |
312 | | // range of 1.0 - 2.0. |
313 | 0 | if(EltCount < capacity()) |
314 | 0 | return; |
315 | 0 | GrowBucketCount(PowerOf2Floor(EltCount)); |
316 | 0 | } |
317 | | |
318 | | /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists, |
319 | | /// return it. If not, return the insertion token that will make insertion |
320 | | /// faster. |
321 | | FoldingSetBase::Node * |
322 | | FoldingSetBase::FindNodeOrInsertPos(const FoldingSetNodeID &ID, |
323 | 3.43M | void *&InsertPos) { |
324 | 3.43M | unsigned IDHash = ID.ComputeHash(); |
325 | 3.43M | void **Bucket = GetBucketFor(IDHash, Buckets, NumBuckets); |
326 | 3.43M | void *Probe = *Bucket; |
327 | | |
328 | 3.43M | InsertPos = nullptr; |
329 | | |
330 | 3.43M | FoldingSetNodeID TempID; |
331 | 5.68M | while (Node *NodeInBucket = GetNextPtr(Probe)) { |
332 | 4.84M | if (NodeEquals(NodeInBucket, ID, IDHash, TempID)) |
333 | 2.59M | return NodeInBucket; |
334 | 2.24M | TempID.clear(); |
335 | | |
336 | 2.24M | Probe = NodeInBucket->getNextInBucket(); |
337 | 2.24M | } |
338 | | |
339 | | // Didn't find the node, return null with the bucket as the InsertPos. |
340 | 839k | InsertPos = Bucket; |
341 | 839k | return nullptr; |
342 | 3.43M | } |
343 | | |
344 | | /// InsertNode - Insert the specified node into the folding set, knowing that it |
345 | | /// is not already in the map. InsertPos must be obtained from |
346 | | /// FindNodeOrInsertPos. |
347 | 2.08M | void FoldingSetBase::InsertNode(Node *N, void *InsertPos) { |
348 | 2.08M | assert(!N->getNextInBucket()); |
349 | | // Do we need to grow the hashtable? |
350 | 2.08M | if (NumNodes+1 > capacity()) { |
351 | 151 | GrowHashTable(); |
352 | 151 | FoldingSetNodeID TempID; |
353 | 151 | InsertPos = GetBucketFor(ComputeNodeHash(N, TempID), Buckets, NumBuckets); |
354 | 151 | } |
355 | | |
356 | 2.08M | ++NumNodes; |
357 | | |
358 | | /// The insert position is actually a bucket pointer. |
359 | 2.08M | void **Bucket = static_cast<void**>(InsertPos); |
360 | | |
361 | 2.08M | void *Next = *Bucket; |
362 | | |
363 | | // If this is the first insertion into this bucket, its next pointer will be |
364 | | // null. Pretend as if it pointed to itself, setting the low bit to indicate |
365 | | // that it is a pointer to the bucket. |
366 | 2.08M | if (!Next) |
367 | 996k | Next = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(Bucket)|1); |
368 | | |
369 | | // Set the node's next pointer, and make the bucket point to the node. |
370 | 2.08M | N->SetNextInBucket(Next); |
371 | 2.08M | *Bucket = N; |
372 | 2.08M | } |
373 | | |
374 | | /// RemoveNode - Remove a node from the folding set, returning true if one was |
375 | | /// removed or false if the node was not in the folding set. |
376 | 0 | bool FoldingSetBase::RemoveNode(Node *N) { |
377 | | // Because each bucket is a circular list, we don't need to compute N's hash |
378 | | // to remove it. |
379 | 0 | void *Ptr = N->getNextInBucket(); |
380 | 0 | if (!Ptr) return false; // Not in folding set. |
381 | | |
382 | 0 | --NumNodes; |
383 | 0 | N->SetNextInBucket(nullptr); |
384 | | |
385 | | // Remember what N originally pointed to, either a bucket or another node. |
386 | 0 | void *NodeNextPtr = Ptr; |
387 | | |
388 | | // Chase around the list until we find the node (or bucket) which points to N. |
389 | 0 | while (true) { |
390 | 0 | if (Node *NodeInBucket = GetNextPtr(Ptr)) { |
391 | | // Advance pointer. |
392 | 0 | Ptr = NodeInBucket->getNextInBucket(); |
393 | | |
394 | | // We found a node that points to N, change it to point to N's next node, |
395 | | // removing N from the list. |
396 | 0 | if (Ptr == N) { |
397 | 0 | NodeInBucket->SetNextInBucket(NodeNextPtr); |
398 | 0 | return true; |
399 | 0 | } |
400 | 0 | } else { |
401 | 0 | void **Bucket = GetBucketPtr(Ptr); |
402 | 0 | Ptr = *Bucket; |
403 | | |
404 | | // If we found that the bucket points to N, update the bucket to point to |
405 | | // whatever is next. |
406 | 0 | if (Ptr == N) { |
407 | 0 | *Bucket = NodeNextPtr; |
408 | 0 | return true; |
409 | 0 | } |
410 | 0 | } |
411 | 0 | } |
412 | 0 | } |
413 | | |
414 | | /// GetOrInsertNode - If there is an existing simple Node exactly |
415 | | /// equal to the specified node, return it. Otherwise, insert 'N' and it |
416 | | /// instead. |
417 | 0 | FoldingSetBase::Node *FoldingSetBase::GetOrInsertNode(FoldingSetBase::Node *N) { |
418 | 0 | FoldingSetNodeID ID; |
419 | 0 | GetNodeProfile(N, ID); |
420 | 0 | void *IP; |
421 | 0 | if (Node *E = FindNodeOrInsertPos(ID, IP)) |
422 | 0 | return E; |
423 | 0 | InsertNode(N, IP); |
424 | 0 | return N; |
425 | 0 | } |
426 | | |
427 | | //===----------------------------------------------------------------------===// |
428 | | // FoldingSetIteratorImpl Implementation |
429 | | |
430 | 1.17k | FoldingSetIteratorImpl::FoldingSetIteratorImpl(void **Bucket) { |
431 | | // Skip to the first non-null non-self-cycle bucket. |
432 | 23.9k | while (*Bucket != reinterpret_cast<void*>(-1) && |
433 | 23.9k | (!*Bucket || !GetNextPtr(*Bucket))) |
434 | 22.7k | ++Bucket; |
435 | | |
436 | 1.17k | NodePtr = static_cast<FoldingSetNode*>(*Bucket); |
437 | 1.17k | } |
438 | | |
439 | 839k | void FoldingSetIteratorImpl::advance() { |
440 | | // If there is another link within this bucket, go to it. |
441 | 839k | void *Probe = NodePtr->getNextInBucket(); |
442 | | |
443 | 839k | if (FoldingSetNode *NextNodeInBucket = GetNextPtr(Probe)) |
444 | 382k | NodePtr = NextNodeInBucket; |
445 | 457k | else { |
446 | | // Otherwise, this is the last link in this bucket. |
447 | 457k | void **Bucket = GetBucketPtr(Probe); |
448 | | |
449 | | // Skip to the next non-null non-self-cycle bucket. |
450 | 638k | do { |
451 | 638k | ++Bucket; |
452 | 638k | } while (*Bucket != reinterpret_cast<void*>(-1) && |
453 | 638k | (!*Bucket || !GetNextPtr(*Bucket))); |
454 | | |
455 | 457k | NodePtr = static_cast<FoldingSetNode*>(*Bucket); |
456 | 457k | } |
457 | 839k | } |
458 | | |
459 | | //===----------------------------------------------------------------------===// |
460 | | // FoldingSetBucketIteratorImpl Implementation |
461 | | |
462 | 0 | FoldingSetBucketIteratorImpl::FoldingSetBucketIteratorImpl(void **Bucket) { |
463 | 0 | Ptr = (!*Bucket || !GetNextPtr(*Bucket)) ? (void*) Bucket : *Bucket; |
464 | 0 | } |