/src/mozilla-central/xpcom/ds/PLDHashTable.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include <new> |
8 | | #include <stdio.h> |
9 | | #include <stdlib.h> |
10 | | #include <string.h> |
11 | | #include "PLDHashTable.h" |
12 | | #include "mozilla/HashFunctions.h" |
13 | | #include "mozilla/MathAlgorithms.h" |
14 | | #include "mozilla/OperatorNewExtensions.h" |
15 | | #include "nsAlgorithm.h" |
16 | | #include "nsPointerHashKeys.h" |
17 | | #include "mozilla/Likely.h" |
18 | | #include "mozilla/MemoryReporting.h" |
19 | | #include "mozilla/ChaosMode.h" |
20 | | |
21 | | using namespace mozilla; |
22 | | |
23 | | #ifdef DEBUG |
24 | | |
25 | | class AutoReadOp |
26 | | { |
27 | | Checker& mChk; |
28 | | public: |
29 | | explicit AutoReadOp(Checker& aChk) : mChk(aChk) { mChk.StartReadOp(); } |
30 | | ~AutoReadOp() { mChk.EndReadOp(); } |
31 | | }; |
32 | | |
33 | | class AutoWriteOp |
34 | | { |
35 | | Checker& mChk; |
36 | | public: |
37 | | explicit AutoWriteOp(Checker& aChk) : mChk(aChk) { mChk.StartWriteOp(); } |
38 | | ~AutoWriteOp() { mChk.EndWriteOp(); } |
39 | | }; |
40 | | |
41 | | class AutoIteratorRemovalOp |
42 | | { |
43 | | Checker& mChk; |
44 | | public: |
45 | | explicit AutoIteratorRemovalOp(Checker& aChk) |
46 | | : mChk(aChk) |
47 | | { |
48 | | mChk.StartIteratorRemovalOp(); |
49 | | } |
50 | | ~AutoIteratorRemovalOp() { mChk.EndIteratorRemovalOp(); } |
51 | | }; |
52 | | |
53 | | class AutoDestructorOp |
54 | | { |
55 | | Checker& mChk; |
56 | | public: |
57 | | explicit AutoDestructorOp(Checker& aChk) |
58 | | : mChk(aChk) |
59 | | { |
60 | | mChk.StartDestructorOp(); |
61 | | } |
62 | | ~AutoDestructorOp() { mChk.EndDestructorOp(); } |
63 | | }; |
64 | | |
65 | | #endif |
66 | | |
67 | | /* static */ PLDHashNumber |
68 | | PLDHashTable::HashStringKey(const void* aKey) |
69 | 0 | { |
70 | 0 | return HashString(static_cast<const char*>(aKey)); |
71 | 0 | } |
72 | | |
73 | | /* static */ PLDHashNumber |
74 | | PLDHashTable::HashVoidPtrKeyStub(const void* aKey) |
75 | 19.3M | { |
76 | 19.3M | return nsPtrHashKey<void>::HashKey(aKey); |
77 | 19.3M | } |
78 | | |
79 | | /* static */ bool |
80 | | PLDHashTable::MatchEntryStub(const PLDHashEntryHdr* aEntry, const void* aKey) |
81 | 3.24M | { |
82 | 3.24M | const PLDHashEntryStub* stub = (const PLDHashEntryStub*)aEntry; |
83 | 3.24M | |
84 | 3.24M | return stub->key == aKey; |
85 | 3.24M | } |
86 | | |
87 | | /* static */ bool |
88 | | PLDHashTable::MatchStringKey(const PLDHashEntryHdr* aEntry, const void* aKey) |
89 | 0 | { |
90 | 0 | const PLDHashEntryStub* stub = (const PLDHashEntryStub*)aEntry; |
91 | 0 |
|
92 | 0 | // XXX tolerate null keys on account of sloppy Mozilla callers. |
93 | 0 | return stub->key == aKey || |
94 | 0 | (stub->key && aKey && |
95 | 0 | strcmp((const char*)stub->key, (const char*)aKey) == 0); |
96 | 0 | } |
97 | | |
98 | | /* static */ void |
99 | | PLDHashTable::MoveEntryStub(PLDHashTable* aTable, |
100 | | const PLDHashEntryHdr* aFrom, |
101 | | PLDHashEntryHdr* aTo) |
102 | 9.76M | { |
103 | 9.76M | memcpy(aTo, aFrom, aTable->mEntrySize); |
104 | 9.76M | } |
105 | | |
106 | | /* static */ void |
107 | | PLDHashTable::ClearEntryStub(PLDHashTable* aTable, PLDHashEntryHdr* aEntry) |
108 | 6.22M | { |
109 | 6.22M | memset(aEntry, 0, aTable->mEntrySize); |
110 | 6.22M | } |
111 | | |
112 | | static const PLDHashTableOps gStubOps = { |
113 | | PLDHashTable::HashVoidPtrKeyStub, |
114 | | PLDHashTable::MatchEntryStub, |
115 | | PLDHashTable::MoveEntryStub, |
116 | | PLDHashTable::ClearEntryStub, |
117 | | nullptr |
118 | | }; |
119 | | |
120 | | /* static */ const PLDHashTableOps* |
121 | | PLDHashTable::StubOps() |
122 | 15 | { |
123 | 15 | return &gStubOps; |
124 | 15 | } |
125 | | |
126 | | static bool |
127 | | SizeOfEntryStore(uint32_t aCapacity, uint32_t aEntrySize, uint32_t* aNbytes) |
128 | 2.97k | { |
129 | 2.97k | uint64_t nbytes64 = uint64_t(aCapacity) * uint64_t(aEntrySize); |
130 | 2.97k | *aNbytes = aCapacity * aEntrySize; |
131 | 2.97k | return uint64_t(*aNbytes) == nbytes64; // returns false on overflow |
132 | 2.97k | } |
133 | | |
134 | | // Compute max and min load numbers (entry counts). We have a secondary max |
135 | | // that allows us to overload a table reasonably if it cannot be grown further |
136 | | // (i.e. if ChangeTable() fails). The table slows down drastically if the |
137 | | // secondary max is too close to 1, but 0.96875 gives only a slight slowdown |
138 | | // while allowing 1.3x more elements. |
139 | | static inline uint32_t |
140 | | MaxLoad(uint32_t aCapacity) |
141 | 15.5M | { |
142 | 15.5M | return aCapacity - (aCapacity >> 2); // == aCapacity * 0.75 |
143 | 15.5M | } |
144 | | static inline uint32_t |
145 | | MaxLoadOnGrowthFailure(uint32_t aCapacity) |
146 | 0 | { |
147 | 0 | return aCapacity - (aCapacity >> 5); // == aCapacity * 0.96875 |
148 | 0 | } |
149 | | static inline uint32_t |
150 | | MinLoad(uint32_t aCapacity) |
151 | 1.55M | { |
152 | 1.55M | return aCapacity >> 2; // == aCapacity * 0.25 |
153 | 1.55M | } |
154 | | |
155 | | // Compute the minimum capacity (and the Log2 of that capacity) for a table |
156 | | // containing |aLength| elements while respecting the following contraints: |
157 | | // - table must be at most 75% full; |
158 | | // - capacity must be a power of two; |
159 | | // - capacity cannot be too small. |
160 | | static inline void |
161 | | BestCapacity(uint32_t aLength, uint32_t* aCapacityOut, |
162 | | uint32_t* aLog2CapacityOut) |
163 | 1.76k | { |
164 | 1.76k | // Compute the smallest capacity allowing |aLength| elements to be inserted |
165 | 1.76k | // without rehashing. |
166 | 1.76k | uint32_t capacity = (aLength * 4 + (3 - 1)) / 3; // == ceil(aLength * 4 / 3) |
167 | 1.76k | if (capacity < PLDHashTable::kMinCapacity) { |
168 | 1.20k | capacity = PLDHashTable::kMinCapacity; |
169 | 1.20k | } |
170 | 1.76k | |
171 | 1.76k | // Round up capacity to next power-of-two. |
172 | 1.76k | uint32_t log2 = CeilingLog2(capacity); |
173 | 1.76k | capacity = 1u << log2; |
174 | 1.76k | MOZ_ASSERT(capacity <= PLDHashTable::kMaxCapacity); |
175 | 1.76k | |
176 | 1.76k | *aCapacityOut = capacity; |
177 | 1.76k | *aLog2CapacityOut = log2; |
178 | 1.76k | } |
179 | | |
180 | | /* static */ MOZ_ALWAYS_INLINE uint32_t |
181 | | PLDHashTable::HashShift(uint32_t aEntrySize, uint32_t aLength) |
182 | 1.66k | { |
183 | 1.66k | if (aLength > kMaxInitialLength) { |
184 | 0 | MOZ_CRASH("Initial length is too large"); |
185 | 0 | } |
186 | 1.66k | |
187 | 1.66k | uint32_t capacity, log2; |
188 | 1.66k | BestCapacity(aLength, &capacity, &log2); |
189 | 1.66k | |
190 | 1.66k | uint32_t nbytes; |
191 | 1.66k | if (!SizeOfEntryStore(capacity, aEntrySize, &nbytes)) { |
192 | 0 | MOZ_CRASH("Initial entry store size is too large"); |
193 | 0 | } |
194 | 1.66k | |
195 | 1.66k | // Compute the hashShift value. |
196 | 1.66k | return kPLDHashNumberBits - log2; |
197 | 1.66k | } |
198 | | |
199 | | PLDHashTable::PLDHashTable(const PLDHashTableOps* aOps, uint32_t aEntrySize, |
200 | | uint32_t aLength) |
201 | | : mOps(recordreplay::GeneratePLDHashTableCallbacks(aOps)) |
202 | | , mEntryStore() |
203 | | , mGeneration(0) |
204 | | , mHashShift(HashShift(aEntrySize, aLength)) |
205 | | , mEntrySize(aEntrySize) |
206 | | , mEntryCount(0) |
207 | | , mRemovedCount(0) |
208 | | #ifdef DEBUG |
209 | | , mChecker() |
210 | | #endif |
211 | 1.66k | { |
212 | 1.66k | // An entry size greater than 0xff is unlikely, but let's check anyway. If |
213 | 1.66k | // you hit this, your hashtable would waste lots of space for unused entries |
214 | 1.66k | // and you should change your hash table's entries to pointers. |
215 | 1.66k | if (aEntrySize != uint32_t(mEntrySize)) { |
216 | 0 | MOZ_CRASH("Entry size is too large"); |
217 | 0 | } |
218 | 1.66k | } |
219 | | |
220 | | PLDHashTable& |
221 | | PLDHashTable::operator=(PLDHashTable&& aOther) |
222 | 0 | { |
223 | 0 | if (this == &aOther) { |
224 | 0 | return *this; |
225 | 0 | } |
226 | 0 | |
227 | 0 | // |mOps| and |mEntrySize| are required to stay the same, they're |
228 | 0 | // conceptually part of the type -- indeed, if PLDHashTable was a templated |
229 | 0 | // type like nsTHashtable, they *would* be part of the type -- so it only |
230 | 0 | // makes sense to assign in cases where they match. An exception is when we |
231 | 0 | // are recording or replaying the execution, in which case custom ops are |
232 | 0 | // generated for each table. |
233 | 0 | MOZ_RELEASE_ASSERT(mOps == aOther.mOps || !mOps || recordreplay::IsRecordingOrReplaying()); |
234 | 0 | MOZ_RELEASE_ASSERT(mEntrySize == aOther.mEntrySize || !mEntrySize); |
235 | 0 |
|
236 | 0 | // Reconstruct |this|. |
237 | 0 | const PLDHashTableOps* ops = recordreplay::UnwrapPLDHashTableCallbacks(aOther.mOps); |
238 | 0 | this->~PLDHashTable(); |
239 | 0 | new (KnownNotNull, this) PLDHashTable(ops, aOther.mEntrySize, 0); |
240 | 0 |
|
241 | 0 | // Move non-const pieces over. |
242 | 0 | mHashShift = std::move(aOther.mHashShift); |
243 | 0 | mEntryCount = std::move(aOther.mEntryCount); |
244 | 0 | mRemovedCount = std::move(aOther.mRemovedCount); |
245 | 0 | mEntryStore.Set(aOther.mEntryStore.Get(), &mGeneration); |
246 | | #ifdef DEBUG |
247 | | mChecker = std::move(aOther.mChecker); |
248 | | #endif |
249 | |
|
250 | 0 | recordreplay::MovePLDHashTableContents(aOther.mOps, mOps); |
251 | 0 |
|
252 | 0 | // Clear up |aOther| so its destruction will be a no-op and it reports being |
253 | 0 | // empty. |
254 | 0 | { |
255 | | #ifdef DEBUG |
256 | | AutoDestructorOp op(mChecker); |
257 | | #endif |
258 | | aOther.mEntryCount = 0; |
259 | 0 | aOther.mEntryStore.Set(nullptr, &aOther.mGeneration); |
260 | 0 | } |
261 | 0 |
|
262 | 0 | return *this; |
263 | 0 | } |
264 | | |
265 | | PLDHashNumber |
266 | | PLDHashTable::Hash1(PLDHashNumber aHash0) const |
267 | 57.3M | { |
268 | 57.3M | return aHash0 >> mHashShift; |
269 | 57.3M | } |
270 | | |
271 | | void |
272 | | PLDHashTable::Hash2(PLDHashNumber aHash0, |
273 | | uint32_t& aHash2Out, uint32_t& aSizeMaskOut) const |
274 | 22.1M | { |
275 | 22.1M | uint32_t sizeLog2 = kPLDHashNumberBits - mHashShift; |
276 | 22.1M | uint32_t sizeMask = (PLDHashNumber(1) << sizeLog2) - 1; |
277 | 22.1M | aSizeMaskOut = sizeMask; |
278 | 22.1M | |
279 | 22.1M | // The incoming aHash0 always has the low bit unset (since we leave it |
280 | 22.1M | // free for the collision flag), and should have reasonably random |
281 | 22.1M | // data in the other 31 bits. We used the high bits of aHash0 for |
282 | 22.1M | // Hash1, so we use the low bits here. If the table size is large, |
283 | 22.1M | // the bits we use may overlap, but that's still more random than |
284 | 22.1M | // filling with 0s. |
285 | 22.1M | // |
286 | 22.1M | // Double hashing needs the second hash code to be relatively prime to table |
287 | 22.1M | // size, so we simply make hash2 odd. |
288 | 22.1M | // |
289 | 22.1M | // This also conveniently covers up the fact that we have the low bit |
290 | 22.1M | // unset since aHash0 has the low bit unset. |
291 | 22.1M | aHash2Out = (aHash0 & sizeMask) | 1; |
292 | 22.1M | } |
293 | | |
294 | | // Reserve mKeyHash 0 for free entries and 1 for removed-entry sentinels. Note |
295 | | // that a removed-entry sentinel need be stored only if the removed entry had |
296 | | // a colliding entry added after it. Therefore we can use 1 as the collision |
297 | | // flag in addition to the removed-entry sentinel value. Multiplicative hash |
298 | | // uses the high order bits of mKeyHash, so this least-significant reservation |
299 | | // should not hurt the hash function's effectiveness much. |
300 | | |
301 | | // Match an entry's mKeyHash against an unstored one computed from a key. |
302 | | /* static */ bool |
303 | | PLDHashTable::MatchEntryKeyhash(const PLDHashEntryHdr* aEntry, |
304 | | const PLDHashNumber aKeyHash) |
305 | 70.5M | { |
306 | 70.5M | return (aEntry->mKeyHash & ~kCollisionFlag) == aKeyHash; |
307 | 70.5M | } |
308 | | |
309 | | // Compute the address of the indexed entry in table. |
310 | | PLDHashEntryHdr* |
311 | | PLDHashTable::AddressEntry(uint32_t aIndex) const |
312 | 97.8M | { |
313 | 97.8M | return const_cast<PLDHashEntryHdr*>( |
314 | 97.8M | reinterpret_cast<const PLDHashEntryHdr*>( |
315 | 97.8M | mEntryStore.Get() + aIndex * mEntrySize)); |
316 | 97.8M | } |
317 | | |
318 | | PLDHashTable::~PLDHashTable() |
319 | 910 | { |
320 | | #ifdef DEBUG |
321 | | AutoDestructorOp op(mChecker); |
322 | | #endif |
323 | | |
324 | 910 | if (!mEntryStore.Get()) { |
325 | 907 | recordreplay::DestroyPLDHashTableCallbacks(mOps); |
326 | 907 | return; |
327 | 907 | } |
328 | 3 | |
329 | 3 | // Clear any remaining live entries. |
330 | 3 | char* entryAddr = mEntryStore.Get(); |
331 | 3 | char* entryLimit = entryAddr + Capacity() * mEntrySize; |
332 | 99 | while (entryAddr < entryLimit) { |
333 | 96 | PLDHashEntryHdr* entry = (PLDHashEntryHdr*)entryAddr; |
334 | 96 | if (EntryIsLive(entry)) { |
335 | 46 | mOps->clearEntry(this, entry); |
336 | 46 | } |
337 | 96 | entryAddr += mEntrySize; |
338 | 96 | } |
339 | 3 | |
340 | 3 | recordreplay::DestroyPLDHashTableCallbacks(mOps); |
341 | 3 | |
342 | 3 | // Entry storage is freed last, by ~EntryStore(). |
343 | 3 | } |
344 | | |
345 | | void |
346 | | PLDHashTable::ClearAndPrepareForLength(uint32_t aLength) |
347 | 24 | { |
348 | 24 | // Get these values before the destructor clobbers them. |
349 | 24 | const PLDHashTableOps* ops = recordreplay::UnwrapPLDHashTableCallbacks(mOps); |
350 | 24 | uint32_t entrySize = mEntrySize; |
351 | 24 | |
352 | 24 | this->~PLDHashTable(); |
353 | 24 | new (KnownNotNull, this) PLDHashTable(ops, entrySize, aLength); |
354 | 24 | } |
355 | | |
356 | | void |
357 | | PLDHashTable::Clear() |
358 | 24 | { |
359 | 24 | ClearAndPrepareForLength(kDefaultInitialLength); |
360 | 24 | } |
361 | | |
362 | | // If |Reason| is |ForAdd|, the return value is always non-null and it may be |
363 | | // a previously-removed entry. If |Reason| is |ForSearchOrRemove|, the return |
364 | | // value is null on a miss, and will never be a previously-removed entry on a |
365 | | // hit. This distinction is a bit grotty but this function is hot enough that |
366 | | // these differences are worthwhile. (It's also hot enough that |
367 | | // MOZ_ALWAYS_INLINE makes a significant difference.) |
368 | | template <PLDHashTable::SearchReason Reason> |
369 | | MOZ_ALWAYS_INLINE PLDHashEntryHdr* |
370 | | PLDHashTable::SearchTable(const void* aKey, PLDHashNumber aKeyHash) const |
371 | 47.5M | { |
372 | 47.5M | MOZ_ASSERT(mEntryStore.Get()); |
373 | 47.5M | NS_ASSERTION(!(aKeyHash & kCollisionFlag), |
374 | 47.5M | "!(aKeyHash & kCollisionFlag)"); |
375 | 47.5M | |
376 | 47.5M | // Compute the primary hash address. |
377 | 47.5M | PLDHashNumber hash1 = Hash1(aKeyHash); |
378 | 47.5M | PLDHashEntryHdr* entry = AddressEntry(hash1); |
379 | 47.5M | |
380 | 47.5M | // Miss: return space for a new entry. |
381 | 47.5M | if (EntryIsFree(entry)) { |
382 | 6.75M | return (Reason == ForAdd) ? entry : nullptr; |
383 | 6.75M | } |
384 | 40.8M | |
385 | 40.8M | // Hit: return entry. |
386 | 40.8M | PLDHashMatchEntry matchEntry = mOps->matchEntry; |
387 | 40.8M | if (MatchEntryKeyhash(entry, aKeyHash) && |
388 | 40.8M | matchEntry(entry, aKey)) { |
389 | 20.6M | return entry; |
390 | 20.6M | } |
391 | 20.2M | |
392 | 20.2M | // Collision: double hash. |
393 | 20.2M | PLDHashNumber hash2; |
394 | 20.2M | uint32_t sizeMask; |
395 | 20.2M | Hash2(aKeyHash, hash2, sizeMask); |
396 | 20.2M | |
397 | 20.2M | // Save the first removed entry pointer so Add() can recycle it. (Only used |
398 | 20.2M | // if Reason==ForAdd.) |
399 | 20.2M | PLDHashEntryHdr* firstRemoved = nullptr; |
400 | 20.2M | |
401 | 37.8M | for (;;) { |
402 | 37.8M | if (Reason == ForAdd && !firstRemoved) { |
403 | 10.8M | if (MOZ_UNLIKELY(EntryIsRemoved(entry))) { |
404 | 17.8k | firstRemoved = entry; |
405 | 10.7M | } else { |
406 | 10.7M | entry->mKeyHash |= kCollisionFlag; |
407 | 10.7M | } |
408 | 10.8M | } |
409 | 37.8M | |
410 | 37.8M | hash1 -= hash2; |
411 | 37.8M | hash1 &= sizeMask; |
412 | 37.8M | |
413 | 37.8M | entry = AddressEntry(hash1); |
414 | 37.8M | if (EntryIsFree(entry)) { |
415 | 8.10M | return (Reason == ForAdd) ? (firstRemoved ? firstRemoved : entry) |
416 | 8.10M | : nullptr; |
417 | 8.10M | } |
418 | 29.7M | |
419 | 29.7M | if (MatchEntryKeyhash(entry, aKeyHash) && |
420 | 29.7M | matchEntry(entry, aKey)) { |
421 | 12.1M | return entry; |
422 | 12.1M | } |
423 | 29.7M | } |
424 | 20.2M | |
425 | 20.2M | // NOTREACHED |
426 | 18.4E | return nullptr; |
427 | 20.2M | } PLDHashEntryHdr* PLDHashTable::SearchTable<(PLDHashTable::SearchReason)0>(void const*, unsigned int) const Line | Count | Source | 371 | 31.9M | { | 372 | 31.9M | MOZ_ASSERT(mEntryStore.Get()); | 373 | 31.9M | NS_ASSERTION(!(aKeyHash & kCollisionFlag), | 374 | 31.9M | "!(aKeyHash & kCollisionFlag)"); | 375 | 31.9M | | 376 | 31.9M | // Compute the primary hash address. | 377 | 31.9M | PLDHashNumber hash1 = Hash1(aKeyHash); | 378 | 31.9M | PLDHashEntryHdr* entry = AddressEntry(hash1); | 379 | 31.9M | | 380 | 31.9M | // Miss: return space for a new entry. | 381 | 31.9M | if (EntryIsFree(entry)) { | 382 | 3.08M | return (Reason == ForAdd) ? entry : nullptr; | 383 | 3.08M | } | 384 | 28.8M | | 385 | 28.8M | // Hit: return entry. | 386 | 28.8M | PLDHashMatchEntry matchEntry = mOps->matchEntry; | 387 | 28.8M | if (MatchEntryKeyhash(entry, aKeyHash) && | 388 | 28.8M | matchEntry(entry, aKey)) { | 389 | 13.0M | return entry; | 390 | 13.0M | } | 391 | 15.8M | | 392 | 15.8M | // Collision: double hash. | 393 | 15.8M | PLDHashNumber hash2; | 394 | 15.8M | uint32_t sizeMask; | 395 | 15.8M | Hash2(aKeyHash, hash2, sizeMask); | 396 | 15.8M | | 397 | 15.8M | // Save the first removed entry pointer so Add() can recycle it. (Only used | 398 | 15.8M | // if Reason==ForAdd.) | 399 | 15.8M | PLDHashEntryHdr* firstRemoved = nullptr; | 400 | 15.8M | | 401 | 26.9M | for (;;) { | 402 | 26.9M | if (Reason == ForAdd && !firstRemoved) { | 403 | 0 | if (MOZ_UNLIKELY(EntryIsRemoved(entry))) { | 404 | 0 | firstRemoved = entry; | 405 | 0 | } else { | 406 | 0 | entry->mKeyHash |= kCollisionFlag; | 407 | 0 | } | 408 | 0 | } | 409 | 26.9M | | 410 | 26.9M | hash1 -= hash2; | 411 | 26.9M | hash1 &= sizeMask; | 412 | 26.9M | | 413 | 26.9M | entry = AddressEntry(hash1); | 414 | 26.9M | if (EntryIsFree(entry)) { | 415 | 3.70M | return (Reason == ForAdd) ? (firstRemoved ? firstRemoved : entry) | 416 | 3.70M | : nullptr; | 417 | 3.70M | } | 418 | 23.2M | | 419 | 23.2M | if (MatchEntryKeyhash(entry, aKeyHash) && | 420 | 23.2M | matchEntry(entry, aKey)) { | 421 | 12.1M | return entry; | 422 | 12.1M | } | 423 | 23.2M | } | 424 | 15.8M | | 425 | 15.8M | // NOTREACHED | 426 | 18.4E | return nullptr; | 427 | 15.8M | } |
PLDHashEntryHdr* PLDHashTable::SearchTable<(PLDHashTable::SearchReason)1>(void const*, unsigned int) const Line | Count | Source | 371 | 15.5M | { | 372 | 15.5M | MOZ_ASSERT(mEntryStore.Get()); | 373 | 15.5M | NS_ASSERTION(!(aKeyHash & kCollisionFlag), | 374 | 15.5M | "!(aKeyHash & kCollisionFlag)"); | 375 | 15.5M | | 376 | 15.5M | // Compute the primary hash address. | 377 | 15.5M | PLDHashNumber hash1 = Hash1(aKeyHash); | 378 | 15.5M | PLDHashEntryHdr* entry = AddressEntry(hash1); | 379 | 15.5M | | 380 | 15.5M | // Miss: return space for a new entry. | 381 | 15.5M | if (EntryIsFree(entry)) { | 382 | 3.66M | return (Reason == ForAdd) ? entry : nullptr; | 383 | 3.66M | } | 384 | 11.9M | | 385 | 11.9M | // Hit: return entry. | 386 | 11.9M | PLDHashMatchEntry matchEntry = mOps->matchEntry; | 387 | 11.9M | if (MatchEntryKeyhash(entry, aKeyHash) && | 388 | 11.9M | matchEntry(entry, aKey)) { | 389 | 7.51M | return entry; | 390 | 7.51M | } | 391 | 4.40M | | 392 | 4.40M | // Collision: double hash. | 393 | 4.40M | PLDHashNumber hash2; | 394 | 4.40M | uint32_t sizeMask; | 395 | 4.40M | Hash2(aKeyHash, hash2, sizeMask); | 396 | 4.40M | | 397 | 4.40M | // Save the first removed entry pointer so Add() can recycle it. (Only used | 398 | 4.40M | // if Reason==ForAdd.) | 399 | 4.40M | PLDHashEntryHdr* firstRemoved = nullptr; | 400 | 4.40M | | 401 | 10.8M | for (;;) { | 402 | 10.8M | if (Reason == ForAdd && !firstRemoved) { | 403 | 10.8M | if (MOZ_UNLIKELY(EntryIsRemoved(entry))) { | 404 | 17.8k | firstRemoved = entry; | 405 | 10.7M | } else { | 406 | 10.7M | entry->mKeyHash |= kCollisionFlag; | 407 | 10.7M | } | 408 | 10.8M | } | 409 | 10.8M | | 410 | 10.8M | hash1 -= hash2; | 411 | 10.8M | hash1 &= sizeMask; | 412 | 10.8M | | 413 | 10.8M | entry = AddressEntry(hash1); | 414 | 10.8M | if (EntryIsFree(entry)) { | 415 | 4.40M | return (Reason == ForAdd) ? (firstRemoved ? firstRemoved : entry) | 416 | 4.40M | : nullptr; | 417 | 4.40M | } | 418 | 6.42M | | 419 | 6.42M | if (MatchEntryKeyhash(entry, aKeyHash) && | 420 | 6.42M | matchEntry(entry, aKey)) { | 421 | 331 | return entry; | 422 | 331 | } | 423 | 6.42M | } | 424 | 4.40M | | 425 | 4.40M | // NOTREACHED | 426 | 4.40M | return nullptr; | 427 | 4.40M | } |
|
428 | | |
429 | | // This is a copy of SearchTable(), used by ChangeTable(), hardcoded to |
430 | | // 1. assume |Reason| is |ForAdd|, |
431 | | // 2. assume that |aKey| will never match an existing entry, and |
432 | | // 3. assume that no entries have been removed from the current table |
433 | | // structure. |
434 | | // Avoiding the need for |aKey| means we can avoid needing a way to map entries |
435 | | // to keys, which means callers can use complex key types more easily. |
436 | | MOZ_ALWAYS_INLINE PLDHashEntryHdr* |
437 | | PLDHashTable::FindFreeEntry(PLDHashNumber aKeyHash) const |
438 | 9.77M | { |
439 | 9.77M | MOZ_ASSERT(mEntryStore.Get()); |
440 | 9.77M | NS_ASSERTION(!(aKeyHash & kCollisionFlag), |
441 | 9.77M | "!(aKeyHash & kCollisionFlag)"); |
442 | 9.77M | |
443 | 9.77M | // Compute the primary hash address. |
444 | 9.77M | PLDHashNumber hash1 = Hash1(aKeyHash); |
445 | 9.77M | PLDHashEntryHdr* entry = AddressEntry(hash1); |
446 | 9.77M | |
447 | 9.77M | // Miss: return space for a new entry. |
448 | 9.77M | if (EntryIsFree(entry)) { |
449 | 7.86M | return entry; |
450 | 7.86M | } |
451 | 1.90M | |
452 | 1.90M | // Collision: double hash. |
453 | 1.90M | PLDHashNumber hash2; |
454 | 1.90M | uint32_t sizeMask; |
455 | 1.90M | Hash2(aKeyHash, hash2, sizeMask); |
456 | 1.90M | |
457 | 2.65M | for (;;) { |
458 | 2.65M | NS_ASSERTION(!EntryIsRemoved(entry), |
459 | 2.65M | "!EntryIsRemoved(entry)"); |
460 | 2.65M | entry->mKeyHash |= kCollisionFlag; |
461 | 2.65M | |
462 | 2.65M | hash1 -= hash2; |
463 | 2.65M | hash1 &= sizeMask; |
464 | 2.65M | |
465 | 2.65M | entry = AddressEntry(hash1); |
466 | 2.65M | if (EntryIsFree(entry)) { |
467 | 1.90M | return entry; |
468 | 1.90M | } |
469 | 2.65M | } |
470 | 1.90M | |
471 | 1.90M | // NOTREACHED |
472 | 1.90M | } |
473 | | |
474 | | bool |
475 | | PLDHashTable::ChangeTable(int32_t aDeltaLog2) |
476 | 705 | { |
477 | 705 | MOZ_ASSERT(mEntryStore.Get()); |
478 | 705 | |
479 | 705 | // Look, but don't touch, until we succeed in getting new entry store. |
480 | 705 | int32_t oldLog2 = kPLDHashNumberBits - mHashShift; |
481 | 705 | int32_t newLog2 = oldLog2 + aDeltaLog2; |
482 | 705 | uint32_t newCapacity = 1u << newLog2; |
483 | 705 | if (newCapacity > kMaxCapacity) { |
484 | 0 | return false; |
485 | 0 | } |
486 | 705 | |
487 | 705 | uint32_t nbytes; |
488 | 705 | if (!SizeOfEntryStore(newCapacity, mEntrySize, &nbytes)) { |
489 | 0 | return false; // overflowed |
490 | 0 | } |
491 | 705 | |
492 | 705 | char* newEntryStore = (char*)calloc(1, nbytes); |
493 | 705 | if (!newEntryStore) { |
494 | 0 | return false; |
495 | 0 | } |
496 | 705 | |
497 | 705 | // We can't fail from here on, so update table parameters. |
498 | 705 | mHashShift = kPLDHashNumberBits - newLog2; |
499 | 705 | mRemovedCount = 0; |
500 | 705 | |
501 | 705 | // Assign the new entry store to table. |
502 | 705 | char* oldEntryStore; |
503 | 705 | char* oldEntryAddr; |
504 | 705 | oldEntryAddr = oldEntryStore = mEntryStore.Get(); |
505 | 705 | mEntryStore.Set(newEntryStore, &mGeneration); |
506 | 705 | PLDHashMoveEntry moveEntry = mOps->moveEntry; |
507 | 705 | |
508 | 705 | // Copy only live entries, leaving removed ones behind. |
509 | 705 | uint32_t oldCapacity = 1u << oldLog2; |
510 | 24.6M | for (uint32_t i = 0; i < oldCapacity; ++i) { |
511 | 24.6M | PLDHashEntryHdr* oldEntry = (PLDHashEntryHdr*)oldEntryAddr; |
512 | 24.6M | if (EntryIsLive(oldEntry)) { |
513 | 9.77M | const PLDHashNumber key = oldEntry->mKeyHash & ~kCollisionFlag; |
514 | 9.77M | PLDHashEntryHdr* newEntry = FindFreeEntry(key); |
515 | 9.77M | NS_ASSERTION(EntryIsFree(newEntry), "EntryIsFree(newEntry)"); |
516 | 9.77M | moveEntry(this, oldEntry, newEntry); |
517 | 9.77M | newEntry->mKeyHash = key; |
518 | 9.77M | } |
519 | 24.6M | oldEntryAddr += mEntrySize; |
520 | 24.6M | } |
521 | 705 | |
522 | 705 | free(oldEntryStore); |
523 | 705 | return true; |
524 | 705 | } |
525 | | |
526 | | MOZ_ALWAYS_INLINE PLDHashNumber |
527 | | PLDHashTable::ComputeKeyHash(const void* aKey) const |
528 | 47.5M | { |
529 | 47.5M | MOZ_ASSERT(mEntryStore.Get()); |
530 | 47.5M | |
531 | 47.5M | PLDHashNumber keyHash = mozilla::ScrambleHashCode(mOps->hashKey(aKey)); |
532 | 47.5M | |
533 | 47.5M | // Avoid 0 and 1 hash codes, they indicate free and removed entries. |
534 | 47.5M | if (keyHash < 2) { |
535 | 4.87M | keyHash -= 2; |
536 | 4.87M | } |
537 | 47.5M | keyHash &= ~kCollisionFlag; |
538 | 47.5M | |
539 | 47.5M | return keyHash; |
540 | 47.5M | } |
541 | | |
542 | | PLDHashEntryHdr* |
543 | | PLDHashTable::Search(const void* aKey) const |
544 | 30.4M | { |
545 | | #ifdef DEBUG |
546 | | AutoReadOp op(mChecker); |
547 | | #endif |
548 | | |
549 | 30.4M | PLDHashEntryHdr* entry = mEntryStore.Get() |
550 | 30.4M | ? SearchTable<ForSearchOrRemove>(aKey, |
551 | 30.4M | ComputeKeyHash(aKey)) |
552 | 30.4M | : nullptr; |
553 | 30.4M | return entry; |
554 | 30.4M | } |
555 | | |
556 | | PLDHashEntryHdr* |
557 | | PLDHashTable::Add(const void* aKey, const mozilla::fallible_t&) |
558 | 15.5M | { |
559 | | #ifdef DEBUG |
560 | | AutoWriteOp op(mChecker); |
561 | | #endif |
562 | | |
563 | 15.5M | // Allocate the entry storage if it hasn't already been allocated. |
564 | 15.5M | if (!mEntryStore.Get()) { |
565 | 603 | uint32_t nbytes; |
566 | 603 | // We already checked this in the constructor, so it must still be true. |
567 | 603 | MOZ_RELEASE_ASSERT(SizeOfEntryStore(CapacityFromHashShift(), mEntrySize, |
568 | 603 | &nbytes)); |
569 | 603 | mEntryStore.Set((char*)calloc(1, nbytes), &mGeneration); |
570 | 603 | if (!mEntryStore.Get()) { |
571 | 0 | return nullptr; |
572 | 0 | } |
573 | 15.5M | } |
574 | 15.5M | |
575 | 15.5M | // If alpha is >= .75, grow or compress the table. If aKey is already in the |
576 | 15.5M | // table, we may grow once more than necessary, but only if we are on the |
577 | 15.5M | // edge of being overloaded. |
578 | 15.5M | uint32_t capacity = Capacity(); |
579 | 15.5M | if (mEntryCount + mRemovedCount >= MaxLoad(capacity)) { |
580 | 605 | // Compress if a quarter or more of all entries are removed. |
581 | 605 | int deltaLog2; |
582 | 605 | if (mRemovedCount >= capacity >> 2) { |
583 | 0 | deltaLog2 = 0; |
584 | 605 | } else { |
585 | 605 | deltaLog2 = 1; |
586 | 605 | } |
587 | 605 | |
588 | 605 | // Grow or compress the table. If ChangeTable() fails, allow overloading up |
589 | 605 | // to the secondary max. Once we hit the secondary max, return null. |
590 | 605 | if (!ChangeTable(deltaLog2) && |
591 | 605 | mEntryCount + mRemovedCount >= MaxLoadOnGrowthFailure(capacity)) { |
592 | 0 | return nullptr; |
593 | 0 | } |
594 | 15.5M | } |
595 | 15.5M | |
596 | 15.5M | // Look for entry after possibly growing, so we don't have to add it, |
597 | 15.5M | // then skip it while growing the table and re-add it after. |
598 | 15.5M | PLDHashNumber keyHash = ComputeKeyHash(aKey); |
599 | 15.5M | PLDHashEntryHdr* entry = SearchTable<ForAdd>(aKey, keyHash); |
600 | 15.5M | if (!EntryIsLive(entry)) { |
601 | 8.07M | // Initialize the entry, indicating that it's no longer free. |
602 | 8.07M | if (EntryIsRemoved(entry)) { |
603 | 17.8k | mRemovedCount--; |
604 | 17.8k | keyHash |= kCollisionFlag; |
605 | 17.8k | } |
606 | 8.07M | if (mOps->initEntry) { |
607 | 16.0k | mOps->initEntry(entry, aKey); |
608 | 16.0k | } |
609 | 8.07M | entry->mKeyHash = keyHash; |
610 | 8.07M | mEntryCount++; |
611 | 8.07M | } |
612 | 15.5M | |
613 | 15.5M | return entry; |
614 | 15.5M | } |
615 | | |
616 | | PLDHashEntryHdr* |
617 | | PLDHashTable::Add(const void* aKey) |
618 | 7.52M | { |
619 | 7.52M | PLDHashEntryHdr* entry = Add(aKey, fallible); |
620 | 7.52M | if (!entry) { |
621 | 0 | if (!mEntryStore.Get()) { |
622 | 0 | // We OOM'd while allocating the initial entry storage. |
623 | 0 | uint32_t nbytes; |
624 | 0 | (void) SizeOfEntryStore(CapacityFromHashShift(), mEntrySize, &nbytes); |
625 | 0 | NS_ABORT_OOM(nbytes); |
626 | 0 | } else { |
627 | 0 | // We failed to resize the existing entry storage, either due to OOM or |
628 | 0 | // because we exceeded the maximum table capacity or size; report it as |
629 | 0 | // an OOM. The multiplication by 2 gets us the size we tried to allocate, |
630 | 0 | // which is double the current size. |
631 | 0 | NS_ABORT_OOM(2 * EntrySize() * EntryCount()); |
632 | 0 | } |
633 | 0 | } |
634 | 7.52M | return entry; |
635 | 7.52M | } |
636 | | |
637 | | void |
638 | | PLDHashTable::Remove(const void* aKey) |
639 | 1.55M | { |
640 | | #ifdef DEBUG |
641 | | AutoWriteOp op(mChecker); |
642 | | #endif |
643 | | |
644 | 1.55M | PLDHashEntryHdr* entry = mEntryStore.Get() |
645 | 1.55M | ? SearchTable<ForSearchOrRemove>(aKey, |
646 | 1.55M | ComputeKeyHash(aKey)) |
647 | 1.55M | : nullptr; |
648 | 1.55M | if (entry) { |
649 | 1.55M | RawRemove(entry); |
650 | 1.55M | ShrinkIfAppropriate(); |
651 | 1.55M | } |
652 | 1.55M | } |
653 | | |
654 | | void |
655 | | PLDHashTable::RemoveEntry(PLDHashEntryHdr* aEntry) |
656 | 4 | { |
657 | | #ifdef DEBUG |
658 | | AutoWriteOp op(mChecker); |
659 | | #endif |
660 | | |
661 | 4 | RawRemove(aEntry); |
662 | 4 | ShrinkIfAppropriate(); |
663 | 4 | } |
664 | | |
665 | | void |
666 | | PLDHashTable::RawRemove(PLDHashEntryHdr* aEntry) |
667 | 7.77M | { |
668 | 7.77M | // Unfortunately, we can only do weak checking here. That's because |
669 | 7.77M | // RawRemove() can be called legitimately while an Enumerate() call is |
670 | 7.77M | // active, which doesn't fit well into how Checker's mState variable works. |
671 | 7.77M | MOZ_ASSERT(mChecker.IsWritable()); |
672 | 7.77M | |
673 | 7.77M | MOZ_ASSERT(mEntryStore.Get()); |
674 | 7.77M | |
675 | 7.77M | MOZ_ASSERT(EntryIsLive(aEntry), "EntryIsLive(aEntry)"); |
676 | 7.77M | |
677 | 7.77M | // Load keyHash first in case clearEntry() goofs it. |
678 | 7.77M | PLDHashNumber keyHash = aEntry->mKeyHash; |
679 | 7.77M | mOps->clearEntry(this, aEntry); |
680 | 7.77M | if (keyHash & kCollisionFlag) { |
681 | 3.74M | MarkEntryRemoved(aEntry); |
682 | 3.74M | mRemovedCount++; |
683 | 4.03M | } else { |
684 | 4.03M | MarkEntryFree(aEntry); |
685 | 4.03M | } |
686 | 7.77M | mEntryCount--; |
687 | 7.77M | } |
688 | | |
689 | | // Shrink or compress if a quarter or more of all entries are removed, or if the |
690 | | // table is underloaded according to the minimum alpha, and is not minimal-size |
691 | | // already. |
692 | | void |
693 | | PLDHashTable::ShrinkIfAppropriate() |
694 | 1.55M | { |
695 | 1.55M | uint32_t capacity = Capacity(); |
696 | 1.55M | if (mRemovedCount >= capacity >> 2 || |
697 | 1.55M | (capacity > kMinCapacity && mEntryCount <= MinLoad(capacity))) { |
698 | 100 | uint32_t log2; |
699 | 100 | BestCapacity(mEntryCount, &capacity, &log2); |
700 | 100 | |
701 | 100 | int32_t deltaLog2 = log2 - (kPLDHashNumberBits - mHashShift); |
702 | 100 | MOZ_ASSERT(deltaLog2 <= 0); |
703 | 100 | |
704 | 100 | (void) ChangeTable(deltaLog2); |
705 | 100 | } |
706 | 1.55M | } |
707 | | |
708 | | size_t |
709 | | PLDHashTable::ShallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
710 | 0 | { |
711 | | #ifdef DEBUG |
712 | | AutoReadOp op(mChecker); |
713 | | #endif |
714 | |
|
715 | 0 | return aMallocSizeOf(mEntryStore.Get()); |
716 | 0 | } |
717 | | |
718 | | size_t |
719 | | PLDHashTable::ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const |
720 | 0 | { |
721 | 0 | return aMallocSizeOf(this) + ShallowSizeOfExcludingThis(aMallocSizeOf); |
722 | 0 | } |
723 | | |
724 | | PLDHashTable::Iterator::Iterator(Iterator&& aOther) |
725 | | : mTable(aOther.mTable) |
726 | | , mStart(aOther.mStart) |
727 | | , mLimit(aOther.mLimit) |
728 | | , mCurrent(aOther.mCurrent) |
729 | | , mNexts(aOther.mNexts) |
730 | | , mNextsLimit(aOther.mNextsLimit) |
731 | | , mHaveRemoved(aOther.mHaveRemoved) |
732 | 0 | { |
733 | 0 | // No need to change |mChecker| here. |
734 | 0 | aOther.mTable = nullptr; |
735 | 0 | aOther.mStart = nullptr; |
736 | 0 | aOther.mLimit = nullptr; |
737 | 0 | aOther.mCurrent = nullptr; |
738 | 0 | aOther.mNexts = 0; |
739 | 0 | aOther.mNextsLimit = 0; |
740 | 0 | aOther.mHaveRemoved = false; |
741 | 0 | } |
742 | | |
743 | | PLDHashTable::Iterator::Iterator(PLDHashTable* aTable) |
744 | | : mTable(aTable) |
745 | | , mStart(mTable->mEntryStore.Get()) |
746 | | , mLimit(mTable->mEntryStore.Get() + mTable->Capacity() * mTable->mEntrySize) |
747 | | , mCurrent(mTable->mEntryStore.Get()) |
748 | | , mNexts(0) |
749 | | , mNextsLimit(mTable->EntryCount()) |
750 | | , mHaveRemoved(false) |
751 | 274 | { |
752 | | #ifdef DEBUG |
753 | | mTable->mChecker.StartReadOp(); |
754 | | #endif |
755 | | |
756 | 274 | if (ChaosMode::isActive(ChaosFeature::HashTableIteration) && |
757 | 274 | mTable->Capacity() > 0) { |
758 | 0 | // Start iterating at a random entry. It would be even more chaotic to |
759 | 0 | // iterate in fully random order, but that's harder. |
760 | 0 | mCurrent += ChaosMode::randomUint32LessThan(mTable->Capacity()) * |
761 | 0 | mTable->mEntrySize; |
762 | 0 | } |
763 | 274 | |
764 | 274 | // Advance to the first live entry, if there is one. |
765 | 274 | if (!Done()) { |
766 | 307 | while (IsOnNonLiveEntry()) { |
767 | 159 | MoveToNextEntry(); |
768 | 159 | } |
769 | 148 | } |
770 | 274 | } |
771 | | |
772 | | PLDHashTable::Iterator::~Iterator() |
773 | 274 | { |
774 | 274 | if (mTable) { |
775 | 274 | if (mHaveRemoved) { |
776 | 72 | mTable->ShrinkIfAppropriate(); |
777 | 72 | } |
778 | | #ifdef DEBUG |
779 | | mTable->mChecker.EndReadOp(); |
780 | | #endif |
781 | | } |
782 | 274 | } |
783 | | |
784 | | MOZ_ALWAYS_INLINE bool |
785 | | PLDHashTable::Iterator::IsOnNonLiveEntry() const |
786 | 14.7M | { |
787 | 14.7M | MOZ_ASSERT(!Done()); |
788 | 14.7M | return !EntryIsLive(reinterpret_cast<PLDHashEntryHdr*>(mCurrent)); |
789 | 14.7M | } |
790 | | |
791 | | MOZ_ALWAYS_INLINE void |
792 | | PLDHashTable::Iterator::MoveToNextEntry() |
793 | 14.7M | { |
794 | 14.7M | mCurrent += mTable->mEntrySize; |
795 | 14.7M | if (mCurrent == mLimit) { |
796 | 0 | mCurrent = mStart; // Wrap-around. Possible due to Chaos Mode. |
797 | 0 | } |
798 | 14.7M | } |
799 | | |
800 | | void |
801 | | PLDHashTable::Iterator::Next() |
802 | 9.85M | { |
803 | 9.85M | MOZ_ASSERT(!Done()); |
804 | 9.85M | |
805 | 9.85M | mNexts++; |
806 | 9.85M | |
807 | 9.85M | // Advance to the next live entry, if there is one. |
808 | 9.85M | if (!Done()) { |
809 | 14.7M | do { |
810 | 14.7M | MoveToNextEntry(); |
811 | 14.7M | } while (IsOnNonLiveEntry()); |
812 | 9.85M | } |
813 | 9.85M | } |
814 | | |
815 | | void |
816 | | PLDHashTable::Iterator::Remove() |
817 | 6.22M | { |
818 | 6.22M | // This cast is needed for the same reason as the one in the destructor. |
819 | 6.22M | mTable->RawRemove(Get()); |
820 | 6.22M | mHaveRemoved = true; |
821 | 6.22M | } |
822 | | |
823 | | #ifdef DEBUG |
824 | | void |
825 | | PLDHashTable::MarkImmutable() |
826 | | { |
827 | | mChecker.SetNonWritable(); |
828 | | } |
829 | | #endif |