/home/arjun/llvm-project/llvm/lib/Support/FoldingSet.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- Support/FoldingSet.cpp - Uniquing Hash Set --------------*- C++ -*-===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file implements a hash set that can be used to remove duplication of |
10 | | // nodes in a graph. |
11 | | // |
12 | | //===----------------------------------------------------------------------===// |
13 | | |
14 | | #include "llvm/ADT/FoldingSet.h" |
15 | | #include "llvm/ADT/Hashing.h" |
16 | | #include "llvm/ADT/StringRef.h" |
17 | | #include "llvm/Support/Allocator.h" |
18 | | #include "llvm/Support/ErrorHandling.h" |
19 | | #include "llvm/Support/Host.h" |
20 | | #include "llvm/Support/MathExtras.h" |
21 | | #include <cassert> |
22 | | #include <cstring> |
23 | | using namespace llvm; |
24 | | |
25 | | //===----------------------------------------------------------------------===// |
26 | | // FoldingSetNodeIDRef Implementation |
27 | | |
28 | | /// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef, |
29 | | /// used to lookup the node in the FoldingSetBase. |
30 | 0 | unsigned FoldingSetNodeIDRef::ComputeHash() const { |
31 | 0 | return static_cast<unsigned>(hash_combine_range(Data, Data+Size)); |
32 | 0 | } |
33 | | |
34 | 0 | bool FoldingSetNodeIDRef::operator==(FoldingSetNodeIDRef RHS) const { |
35 | 0 | if (Size != RHS.Size) return false; |
36 | 0 | return memcmp(Data, RHS.Data, Size*sizeof(*Data)) == 0; |
37 | 0 | } |
38 | | |
39 | | /// Used to compare the "ordering" of two nodes as defined by the |
40 | | /// profiled bits and their ordering defined by memcmp(). |
41 | 0 | bool FoldingSetNodeIDRef::operator<(FoldingSetNodeIDRef RHS) const { |
42 | 0 | if (Size != RHS.Size) |
43 | 0 | return Size < RHS.Size; |
44 | 0 | return memcmp(Data, RHS.Data, Size*sizeof(*Data)) < 0; |
45 | 0 | } |
46 | | |
47 | | //===----------------------------------------------------------------------===// |
48 | | // FoldingSetNodeID Implementation |
49 | | |
50 | | /// Add* - Add various data types to Bit data. |
51 | | /// |
52 | 0 | void FoldingSetNodeID::AddPointer(const void *Ptr) { |
53 | 0 | // Note: this adds pointers to the hash using sizes and endianness that |
54 | 0 | // depend on the host. It doesn't matter, however, because hashing on |
55 | 0 | // pointer values is inherently unstable. Nothing should depend on the |
56 | 0 | // ordering of nodes in the folding set. |
57 | 0 | static_assert(sizeof(uintptr_t) <= sizeof(unsigned long long), |
58 | 0 | "unexpected pointer size"); |
59 | 0 | AddInteger(reinterpret_cast<uintptr_t>(Ptr)); |
60 | 0 | } |
61 | 0 | void FoldingSetNodeID::AddInteger(signed I) { |
62 | 0 | Bits.push_back(I); |
63 | 0 | } |
64 | 0 | void FoldingSetNodeID::AddInteger(unsigned I) { |
65 | 0 | Bits.push_back(I); |
66 | 0 | } |
67 | 0 | void FoldingSetNodeID::AddInteger(long I) { |
68 | 0 | AddInteger((unsigned long)I); |
69 | 0 | } |
70 | 0 | void FoldingSetNodeID::AddInteger(unsigned long I) { |
71 | 0 | if (sizeof(long) == sizeof(int)) |
72 | 0 | AddInteger(unsigned(I)); |
73 | 0 | else if (sizeof(long) == sizeof(long long)) { |
74 | 0 | AddInteger((unsigned long long)I); |
75 | 0 | } else { |
76 | 0 | llvm_unreachable("unexpected sizeof(long)"); |
77 | 0 | } |
78 | 0 | } |
79 | 0 | void FoldingSetNodeID::AddInteger(long long I) { |
80 | 0 | AddInteger((unsigned long long)I); |
81 | 0 | } |
82 | 0 | void FoldingSetNodeID::AddInteger(unsigned long long I) { |
83 | 0 | AddInteger(unsigned(I)); |
84 | 0 | AddInteger(unsigned(I >> 32)); |
85 | 0 | } |
86 | | |
87 | 0 | void FoldingSetNodeID::AddString(StringRef String) { |
88 | 0 | unsigned Size = String.size(); |
89 | 0 | Bits.push_back(Size); |
90 | 0 | if (!Size) return; |
91 | 0 | |
92 | 0 | unsigned Units = Size / 4; |
93 | 0 | unsigned Pos = 0; |
94 | 0 | const unsigned *Base = (const unsigned*) String.data(); |
95 | 0 |
|
96 | 0 | // If the string is aligned do a bulk transfer. |
97 | 0 | if (!((intptr_t)Base & 3)) { |
98 | 0 | Bits.append(Base, Base + Units); |
99 | 0 | Pos = (Units + 1) * 4; |
100 | 0 | } else { |
101 | 0 | // Otherwise do it the hard way. |
102 | 0 | // To be compatible with above bulk transfer, we need to take endianness |
103 | 0 | // into account. |
104 | 0 | static_assert(sys::IsBigEndianHost || sys::IsLittleEndianHost, |
105 | 0 | "Unexpected host endianness"); |
106 | 0 | if (sys::IsBigEndianHost) { |
107 | 0 | for (Pos += 4; Pos <= Size; Pos += 4) { |
108 | 0 | unsigned V = ((unsigned char)String[Pos - 4] << 24) | |
109 | 0 | ((unsigned char)String[Pos - 3] << 16) | |
110 | 0 | ((unsigned char)String[Pos - 2] << 8) | |
111 | 0 | (unsigned char)String[Pos - 1]; |
112 | 0 | Bits.push_back(V); |
113 | 0 | } |
114 | 0 | } else { // Little-endian host |
115 | 0 | for (Pos += 4; Pos <= Size; Pos += 4) { |
116 | 0 | unsigned V = ((unsigned char)String[Pos - 1] << 24) | |
117 | 0 | ((unsigned char)String[Pos - 2] << 16) | |
118 | 0 | ((unsigned char)String[Pos - 3] << 8) | |
119 | 0 | (unsigned char)String[Pos - 4]; |
120 | 0 | Bits.push_back(V); |
121 | 0 | } |
122 | 0 | } |
123 | 0 | } |
124 | 0 |
|
125 | 0 | // With the leftover bits. |
126 | 0 | unsigned V = 0; |
127 | 0 | // Pos will have overshot size by 4 - #bytes left over. |
128 | 0 | // No need to take endianness into account here - this is always executed. |
129 | 0 | switch (Pos - Size) { |
130 | 0 | case 1: V = (V << 8) | (unsigned char)String[Size - 3]; LLVM_FALLTHROUGH; |
131 | 0 | case 2: V = (V << 8) | (unsigned char)String[Size - 2]; LLVM_FALLTHROUGH; |
132 | 0 | case 3: V = (V << 8) | (unsigned char)String[Size - 1]; break; |
133 | 0 | default: return; // Nothing left. |
134 | 0 | } |
135 | 0 | |
136 | 0 | Bits.push_back(V); |
137 | 0 | } |
138 | | |
139 | | // AddNodeID - Adds the Bit data of another ID to *this. |
140 | 0 | void FoldingSetNodeID::AddNodeID(const FoldingSetNodeID &ID) { |
141 | 0 | Bits.append(ID.Bits.begin(), ID.Bits.end()); |
142 | 0 | } |
143 | | |
144 | | /// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used to |
145 | | /// lookup the node in the FoldingSetBase. |
146 | 0 | unsigned FoldingSetNodeID::ComputeHash() const { |
147 | 0 | return FoldingSetNodeIDRef(Bits.data(), Bits.size()).ComputeHash(); |
148 | 0 | } |
149 | | |
150 | | /// operator== - Used to compare two nodes to each other. |
151 | | /// |
152 | 0 | bool FoldingSetNodeID::operator==(const FoldingSetNodeID &RHS) const { |
153 | 0 | return *this == FoldingSetNodeIDRef(RHS.Bits.data(), RHS.Bits.size()); |
154 | 0 | } |
155 | | |
156 | | /// operator== - Used to compare two nodes to each other. |
157 | | /// |
158 | 0 | bool FoldingSetNodeID::operator==(FoldingSetNodeIDRef RHS) const { |
159 | 0 | return FoldingSetNodeIDRef(Bits.data(), Bits.size()) == RHS; |
160 | 0 | } |
161 | | |
162 | | /// Used to compare the "ordering" of two nodes as defined by the |
163 | | /// profiled bits and their ordering defined by memcmp(). |
164 | 0 | bool FoldingSetNodeID::operator<(const FoldingSetNodeID &RHS) const { |
165 | 0 | return *this < FoldingSetNodeIDRef(RHS.Bits.data(), RHS.Bits.size()); |
166 | 0 | } |
167 | | |
168 | 0 | bool FoldingSetNodeID::operator<(FoldingSetNodeIDRef RHS) const { |
169 | 0 | return FoldingSetNodeIDRef(Bits.data(), Bits.size()) < RHS; |
170 | 0 | } |
171 | | |
172 | | /// Intern - Copy this node's data to a memory region allocated from the |
173 | | /// given allocator and return a FoldingSetNodeIDRef describing the |
174 | | /// interned data. |
175 | | FoldingSetNodeIDRef |
176 | 0 | FoldingSetNodeID::Intern(BumpPtrAllocator &Allocator) const { |
177 | 0 | unsigned *New = Allocator.Allocate<unsigned>(Bits.size()); |
178 | 0 | std::uninitialized_copy(Bits.begin(), Bits.end(), New); |
179 | 0 | return FoldingSetNodeIDRef(New, Bits.size()); |
180 | 0 | } |
181 | | |
182 | | //===----------------------------------------------------------------------===// |
183 | | /// Helper functions for FoldingSetBase. |
184 | | |
185 | | /// GetNextPtr - In order to save space, each bucket is a |
186 | | /// singly-linked-list. In order to make deletion more efficient, we make |
187 | | /// the list circular, so we can delete a node without computing its hash. |
188 | | /// The problem with this is that the start of the hash buckets are not |
189 | | /// Nodes. If NextInBucketPtr is a bucket pointer, this method returns null: |
190 | | /// use GetBucketPtr when this happens. |
191 | 0 | static FoldingSetBase::Node *GetNextPtr(void *NextInBucketPtr) { |
192 | 0 | // The low bit is set if this is the pointer back to the bucket. |
193 | 0 | if (reinterpret_cast<intptr_t>(NextInBucketPtr) & 1) |
194 | 0 | return nullptr; |
195 | 0 | |
196 | 0 | return static_cast<FoldingSetBase::Node*>(NextInBucketPtr); |
197 | 0 | } |
198 | | |
199 | | |
200 | | /// testing. |
201 | 0 | static void **GetBucketPtr(void *NextInBucketPtr) { |
202 | 0 | intptr_t Ptr = reinterpret_cast<intptr_t>(NextInBucketPtr); |
203 | 0 | assert((Ptr & 1) && "Not a bucket pointer"); |
204 | 0 | return reinterpret_cast<void**>(Ptr & ~intptr_t(1)); |
205 | 0 | } |
206 | | |
207 | | /// GetBucketFor - Hash the specified node ID and return the hash bucket for |
208 | | /// the specified ID. |
209 | 0 | static void **GetBucketFor(unsigned Hash, void **Buckets, unsigned NumBuckets) { |
210 | 0 | // NumBuckets is always a power of 2. |
211 | 0 | unsigned BucketNum = Hash & (NumBuckets-1); |
212 | 0 | return Buckets + BucketNum; |
213 | 0 | } |
214 | | |
215 | | /// AllocateBuckets - Allocated initialized bucket memory. |
216 | 0 | static void **AllocateBuckets(unsigned NumBuckets) { |
217 | 0 | void **Buckets = static_cast<void**>(safe_calloc(NumBuckets + 1, |
218 | 0 | sizeof(void*))); |
219 | 0 | // Set the very last bucket to be a non-null "pointer". |
220 | 0 | Buckets[NumBuckets] = reinterpret_cast<void*>(-1); |
221 | 0 | return Buckets; |
222 | 0 | } |
223 | | |
224 | | //===----------------------------------------------------------------------===// |
225 | | // FoldingSetBase Implementation |
226 | | |
227 | 0 | FoldingSetBase::FoldingSetBase(unsigned Log2InitSize) { |
228 | 0 | assert(5 < Log2InitSize && Log2InitSize < 32 && |
229 | 0 | "Initial hash table size out of range"); |
230 | 0 | NumBuckets = 1 << Log2InitSize; |
231 | 0 | Buckets = AllocateBuckets(NumBuckets); |
232 | 0 | NumNodes = 0; |
233 | 0 | } |
234 | | |
235 | | FoldingSetBase::FoldingSetBase(FoldingSetBase &&Arg) |
236 | 0 | : Buckets(Arg.Buckets), NumBuckets(Arg.NumBuckets), NumNodes(Arg.NumNodes) { |
237 | 0 | Arg.Buckets = nullptr; |
238 | 0 | Arg.NumBuckets = 0; |
239 | 0 | Arg.NumNodes = 0; |
240 | 0 | } |
241 | | |
242 | 0 | FoldingSetBase &FoldingSetBase::operator=(FoldingSetBase &&RHS) { |
243 | 0 | free(Buckets); // This may be null if the set is in a moved-from state. |
244 | 0 | Buckets = RHS.Buckets; |
245 | 0 | NumBuckets = RHS.NumBuckets; |
246 | 0 | NumNodes = RHS.NumNodes; |
247 | 0 | RHS.Buckets = nullptr; |
248 | 0 | RHS.NumBuckets = 0; |
249 | 0 | RHS.NumNodes = 0; |
250 | 0 | return *this; |
251 | 0 | } |
252 | | |
253 | 0 | FoldingSetBase::~FoldingSetBase() { |
254 | 0 | free(Buckets); |
255 | 0 | } |
256 | | |
257 | 0 | void FoldingSetBase::clear() { |
258 | 0 | // Set all but the last bucket to null pointers. |
259 | 0 | memset(Buckets, 0, NumBuckets*sizeof(void*)); |
260 | 0 |
|
261 | 0 | // Set the very last bucket to be a non-null "pointer". |
262 | 0 | Buckets[NumBuckets] = reinterpret_cast<void*>(-1); |
263 | 0 |
|
264 | 0 | // Reset the node count to zero. |
265 | 0 | NumNodes = 0; |
266 | 0 | } |
267 | | |
268 | | void FoldingSetBase::GrowBucketCount(unsigned NewBucketCount, |
269 | 0 | const FoldingSetInfo &Info) { |
270 | 0 | assert((NewBucketCount > NumBuckets) && |
271 | 0 | "Can't shrink a folding set with GrowBucketCount"); |
272 | 0 | assert(isPowerOf2_32(NewBucketCount) && "Bad bucket count!"); |
273 | 0 | void **OldBuckets = Buckets; |
274 | 0 | unsigned OldNumBuckets = NumBuckets; |
275 | 0 |
|
276 | 0 | // Clear out new buckets. |
277 | 0 | Buckets = AllocateBuckets(NewBucketCount); |
278 | 0 | // Set NumBuckets only if allocation of new buckets was successful. |
279 | 0 | NumBuckets = NewBucketCount; |
280 | 0 | NumNodes = 0; |
281 | 0 |
|
282 | 0 | // Walk the old buckets, rehashing nodes into their new place. |
283 | 0 | FoldingSetNodeID TempID; |
284 | 0 | for (unsigned i = 0; i != OldNumBuckets; ++i) { |
285 | 0 | void *Probe = OldBuckets[i]; |
286 | 0 | if (!Probe) continue; |
287 | 0 | while (Node *NodeInBucket = GetNextPtr(Probe)) { |
288 | 0 | // Figure out the next link, remove NodeInBucket from the old link. |
289 | 0 | Probe = NodeInBucket->getNextInBucket(); |
290 | 0 | NodeInBucket->SetNextInBucket(nullptr); |
291 | 0 |
|
292 | 0 | // Insert the node into the new bucket, after recomputing the hash. |
293 | 0 | InsertNode(NodeInBucket, |
294 | 0 | GetBucketFor(Info.ComputeNodeHash(this, NodeInBucket, TempID), |
295 | 0 | Buckets, NumBuckets), |
296 | 0 | Info); |
297 | 0 | TempID.clear(); |
298 | 0 | } |
299 | 0 | } |
300 | 0 |
|
301 | 0 | free(OldBuckets); |
302 | 0 | } |
303 | | |
304 | | /// GrowHashTable - Double the size of the hash table and rehash everything. |
305 | | /// |
306 | 0 | void FoldingSetBase::GrowHashTable(const FoldingSetInfo &Info) { |
307 | 0 | GrowBucketCount(NumBuckets * 2, Info); |
308 | 0 | } |
309 | | |
310 | 0 | void FoldingSetBase::reserve(unsigned EltCount, const FoldingSetInfo &Info) { |
311 | 0 | // This will give us somewhere between EltCount / 2 and |
312 | 0 | // EltCount buckets. This puts us in the load factor |
313 | 0 | // range of 1.0 - 2.0. |
314 | 0 | if(EltCount < capacity()) |
315 | 0 | return; |
316 | 0 | GrowBucketCount(PowerOf2Floor(EltCount), Info); |
317 | 0 | } |
318 | | |
319 | | /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists, |
320 | | /// return it. If not, return the insertion token that will make insertion |
321 | | /// faster. |
322 | | FoldingSetBase::Node *FoldingSetBase::FindNodeOrInsertPos( |
323 | 0 | const FoldingSetNodeID &ID, void *&InsertPos, const FoldingSetInfo &Info) { |
324 | 0 | unsigned IDHash = ID.ComputeHash(); |
325 | 0 | void **Bucket = GetBucketFor(IDHash, Buckets, NumBuckets); |
326 | 0 | void *Probe = *Bucket; |
327 | 0 |
|
328 | 0 | InsertPos = nullptr; |
329 | 0 |
|
330 | 0 | FoldingSetNodeID TempID; |
331 | 0 | while (Node *NodeInBucket = GetNextPtr(Probe)) { |
332 | 0 | if (Info.NodeEquals(this, NodeInBucket, ID, IDHash, TempID)) |
333 | 0 | return NodeInBucket; |
334 | 0 | TempID.clear(); |
335 | 0 |
|
336 | 0 | Probe = NodeInBucket->getNextInBucket(); |
337 | 0 | } |
338 | 0 |
|
339 | 0 | // Didn't find the node, return null with the bucket as the InsertPos. |
340 | 0 | InsertPos = Bucket; |
341 | 0 | return nullptr; |
342 | 0 | } |
343 | | |
344 | | /// InsertNode - Insert the specified node into the folding set, knowing that it |
345 | | /// is not already in the map. InsertPos must be obtained from |
346 | | /// FindNodeOrInsertPos. |
347 | | void FoldingSetBase::InsertNode(Node *N, void *InsertPos, |
348 | 0 | const FoldingSetInfo &Info) { |
349 | 0 | assert(!N->getNextInBucket()); |
350 | 0 | // Do we need to grow the hashtable? |
351 | 0 | if (NumNodes+1 > capacity()) { |
352 | 0 | GrowHashTable(Info); |
353 | 0 | FoldingSetNodeID TempID; |
354 | 0 | InsertPos = GetBucketFor(Info.ComputeNodeHash(this, N, TempID), Buckets, |
355 | 0 | NumBuckets); |
356 | 0 | } |
357 | 0 |
|
358 | 0 | ++NumNodes; |
359 | 0 |
|
360 | 0 | /// The insert position is actually a bucket pointer. |
361 | 0 | void **Bucket = static_cast<void**>(InsertPos); |
362 | 0 |
|
363 | 0 | void *Next = *Bucket; |
364 | 0 |
|
365 | 0 | // If this is the first insertion into this bucket, its next pointer will be |
366 | 0 | // null. Pretend as if it pointed to itself, setting the low bit to indicate |
367 | 0 | // that it is a pointer to the bucket. |
368 | 0 | if (!Next) |
369 | 0 | Next = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(Bucket)|1); |
370 | 0 |
|
371 | 0 | // Set the node's next pointer, and make the bucket point to the node. |
372 | 0 | N->SetNextInBucket(Next); |
373 | 0 | *Bucket = N; |
374 | 0 | } |
375 | | |
376 | | /// RemoveNode - Remove a node from the folding set, returning true if one was |
377 | | /// removed or false if the node was not in the folding set. |
378 | 0 | bool FoldingSetBase::RemoveNode(Node *N) { |
379 | 0 | // Because each bucket is a circular list, we don't need to compute N's hash |
380 | 0 | // to remove it. |
381 | 0 | void *Ptr = N->getNextInBucket(); |
382 | 0 | if (!Ptr) return false; // Not in folding set. |
383 | 0 | |
384 | 0 | --NumNodes; |
385 | 0 | N->SetNextInBucket(nullptr); |
386 | 0 |
|
387 | 0 | // Remember what N originally pointed to, either a bucket or another node. |
388 | 0 | void *NodeNextPtr = Ptr; |
389 | 0 |
|
390 | 0 | // Chase around the list until we find the node (or bucket) which points to N. |
391 | 0 | while (true) { |
392 | 0 | if (Node *NodeInBucket = GetNextPtr(Ptr)) { |
393 | 0 | // Advance pointer. |
394 | 0 | Ptr = NodeInBucket->getNextInBucket(); |
395 | 0 |
|
396 | 0 | // We found a node that points to N, change it to point to N's next node, |
397 | 0 | // removing N from the list. |
398 | 0 | if (Ptr == N) { |
399 | 0 | NodeInBucket->SetNextInBucket(NodeNextPtr); |
400 | 0 | return true; |
401 | 0 | } |
402 | 0 | } else { |
403 | 0 | void **Bucket = GetBucketPtr(Ptr); |
404 | 0 | Ptr = *Bucket; |
405 | 0 |
|
406 | 0 | // If we found that the bucket points to N, update the bucket to point to |
407 | 0 | // whatever is next. |
408 | 0 | if (Ptr == N) { |
409 | 0 | *Bucket = NodeNextPtr; |
410 | 0 | return true; |
411 | 0 | } |
412 | 0 | } |
413 | 0 | } |
414 | 0 | } |
415 | | |
416 | | /// GetOrInsertNode - If there is an existing simple Node exactly |
417 | | /// equal to the specified node, return it. Otherwise, insert 'N' and it |
418 | | /// instead. |
419 | | FoldingSetBase::Node * |
420 | | FoldingSetBase::GetOrInsertNode(FoldingSetBase::Node *N, |
421 | 0 | const FoldingSetInfo &Info) { |
422 | 0 | FoldingSetNodeID ID; |
423 | 0 | Info.GetNodeProfile(this, N, ID); |
424 | 0 | void *IP; |
425 | 0 | if (Node *E = FindNodeOrInsertPos(ID, IP, Info)) |
426 | 0 | return E; |
427 | 0 | InsertNode(N, IP, Info); |
428 | 0 | return N; |
429 | 0 | } |
430 | | |
431 | | //===----------------------------------------------------------------------===// |
432 | | // FoldingSetIteratorImpl Implementation |
433 | | |
434 | 0 | FoldingSetIteratorImpl::FoldingSetIteratorImpl(void **Bucket) { |
435 | 0 | // Skip to the first non-null non-self-cycle bucket. |
436 | 0 | while (*Bucket != reinterpret_cast<void*>(-1) && |
437 | 0 | (!*Bucket || !GetNextPtr(*Bucket))) |
438 | 0 | ++Bucket; |
439 | 0 |
|
440 | 0 | NodePtr = static_cast<FoldingSetNode*>(*Bucket); |
441 | 0 | } |
442 | | |
443 | 0 | void FoldingSetIteratorImpl::advance() { |
444 | 0 | // If there is another link within this bucket, go to it. |
445 | 0 | void *Probe = NodePtr->getNextInBucket(); |
446 | 0 |
|
447 | 0 | if (FoldingSetNode *NextNodeInBucket = GetNextPtr(Probe)) |
448 | 0 | NodePtr = NextNodeInBucket; |
449 | 0 | else { |
450 | 0 | // Otherwise, this is the last link in this bucket. |
451 | 0 | void **Bucket = GetBucketPtr(Probe); |
452 | 0 |
|
453 | 0 | // Skip to the next non-null non-self-cycle bucket. |
454 | 0 | do { |
455 | 0 | ++Bucket; |
456 | 0 | } while (*Bucket != reinterpret_cast<void*>(-1) && |
457 | 0 | (!*Bucket || !GetNextPtr(*Bucket))); |
458 | 0 |
|
459 | 0 | NodePtr = static_cast<FoldingSetNode*>(*Bucket); |
460 | 0 | } |
461 | 0 | } |
462 | | |
463 | | //===----------------------------------------------------------------------===// |
464 | | // FoldingSetBucketIteratorImpl Implementation |
465 | | |
466 | 0 | FoldingSetBucketIteratorImpl::FoldingSetBucketIteratorImpl(void **Bucket) { |
467 | 0 | Ptr = (!*Bucket || !GetNextPtr(*Bucket)) ? (void*) Bucket : *Bucket; |
468 | 0 | } |