/work/obj-fuzz/dist/include/PLDHashTable.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | // See the comment at the top of mfbt/HashTable.h for a comparison between |
8 | | // PLDHashTable and mozilla::HashTable. |
9 | | |
10 | | #ifndef PLDHashTable_h |
11 | | #define PLDHashTable_h |
12 | | |
13 | | #include "mozilla/Atomics.h" |
14 | | #include "mozilla/Attributes.h" // for MOZ_ALWAYS_INLINE |
15 | | #include "mozilla/fallible.h" |
16 | | #include "mozilla/HashFunctions.h" |
17 | | #include "mozilla/MemoryReporting.h" |
18 | | #include "mozilla/Move.h" |
19 | | #include "mozilla/Types.h" |
20 | | #include "nscore.h" |
21 | | |
22 | | using PLDHashNumber = mozilla::HashNumber; |
23 | | static const uint32_t kPLDHashNumberBits = mozilla::kHashNumberBits; |
24 | | |
25 | | class PLDHashTable; |
26 | | struct PLDHashTableOps; |
27 | | |
28 | | // Table entry header structure. |
29 | | // |
30 | | // In order to allow in-line allocation of key and value, we do not declare |
31 | | // either here. Instead, the API uses const void *key as a formal parameter. |
32 | | // The key need not be stored in the entry; it may be part of the value, but |
33 | | // need not be stored at all. |
34 | | // |
35 | | // Callback types are defined below and grouped into the PLDHashTableOps |
36 | | // structure, for single static initialization per hash table sub-type. |
37 | | // |
38 | | // Each hash table sub-type should make its entry type a subclass of |
39 | | // PLDHashEntryHdr. The mKeyHash member contains the result of suitably |
40 | | // scrambling the hash code returned from the hashKey callback (see below), |
41 | | // then constraining the result to avoid the magic 0 and 1 values. The stored |
42 | | // mKeyHash value is table size invariant, and it is maintained automatically |
43 | | // -- users need never access it. |
44 | | struct PLDHashEntryHdr |
45 | | { |
46 | | PLDHashEntryHdr() = default; |
47 | | PLDHashEntryHdr(const PLDHashEntryHdr&) = delete; |
48 | | PLDHashEntryHdr& operator=(const PLDHashEntryHdr&) = delete; |
49 | | PLDHashEntryHdr(PLDHashEntryHdr&&) = default; |
50 | | PLDHashEntryHdr& operator=(PLDHashEntryHdr&&) = default; |
51 | | |
52 | | private: |
53 | | friend class PLDHashTable; |
54 | | |
55 | | PLDHashNumber mKeyHash; |
56 | | }; |
57 | | |
58 | | #ifdef DEBUG |
59 | | |
60 | | // This class does three kinds of checking: |
61 | | // |
62 | | // - that calls to one of |mOps| or to an enumerator do not cause re-entry into |
63 | | // the table in an unsafe way; |
64 | | // |
65 | | // - that multiple threads do not access the table in an unsafe way; |
66 | | // |
67 | | // - that a table marked as immutable is not modified. |
68 | | // |
69 | | // "Safe" here means that multiple concurrent read operations are ok, but a |
70 | | // write operation (i.e. one that can cause the entry storage to be reallocated |
71 | | // or destroyed) cannot safely run concurrently with another read or write |
72 | | // operation. This meaning of "safe" is only partial; for example, it does not |
73 | | // cover whether a single entry in the table is modified by two separate |
74 | | // threads. (Doing such checking would be much harder.) |
75 | | // |
76 | | // It does this with two variables: |
77 | | // |
78 | | // - mState, which embodies a tri-stage tagged union with the following |
79 | | // variants: |
80 | | // - Idle |
81 | | // - Read(n), where 'n' is the number of concurrent read operations |
82 | | // - Write |
83 | | // |
84 | | // - mIsWritable, which indicates if the table is mutable. |
85 | | // |
86 | | class Checker |
87 | | { |
88 | | public: |
89 | | constexpr Checker() : mState(kIdle), mIsWritable(1) {} |
90 | | |
91 | | Checker& operator=(Checker&& aOther) { |
92 | | // Atomic<> doesn't have an |operator=(Atomic<>&&)|. |
93 | | mState = uint32_t(aOther.mState); |
94 | | mIsWritable = uint32_t(aOther.mIsWritable); |
95 | | |
96 | | aOther.mState = kIdle; |
97 | | |
98 | | return *this; |
99 | | } |
100 | | |
101 | | static bool IsIdle(uint32_t aState) { return aState == kIdle; } |
102 | | static bool IsRead(uint32_t aState) { return kRead1 <= aState && |
103 | | aState <= kReadMax; } |
104 | | static bool IsRead1(uint32_t aState) { return aState == kRead1; } |
105 | | static bool IsWrite(uint32_t aState) { return aState == kWrite; } |
106 | | |
107 | | bool IsIdle() const { return mState == kIdle; } |
108 | | |
109 | | bool IsWritable() const { return !!mIsWritable; } |
110 | | |
111 | | void SetNonWritable() { mIsWritable = 0; } |
112 | | |
113 | | // NOTE: the obvious way to implement these functions is to (a) check |
114 | | // |mState| is reasonable, and then (b) update |mState|. But the lack of |
115 | | // atomicity in such an implementation can cause problems if we get unlucky |
116 | | // thread interleaving between (a) and (b). |
117 | | // |
118 | | // So instead for |mState| we are careful to (a) first get |mState|'s old |
119 | | // value and assign it a new value in single atomic operation, and only then |
120 | | // (b) check the old value was reasonable. This ensures we don't have |
121 | | // interleaving problems. |
122 | | // |
123 | | // For |mIsWritable| we don't need to be as careful because it can only in |
124 | | // transition in one direction (from writable to non-writable). |
125 | | |
126 | | void StartReadOp() |
127 | | { |
128 | | uint32_t oldState = mState++; // this is an atomic increment |
129 | | MOZ_ASSERT(IsIdle(oldState) || IsRead(oldState)); |
130 | | MOZ_ASSERT(oldState < kReadMax); // check for overflow |
131 | | } |
132 | | |
133 | | void EndReadOp() |
134 | | { |
135 | | uint32_t oldState = mState--; // this is an atomic decrement |
136 | | MOZ_ASSERT(IsRead(oldState)); |
137 | | } |
138 | | |
139 | | void StartWriteOp() |
140 | | { |
141 | | MOZ_ASSERT(IsWritable()); |
142 | | uint32_t oldState = mState.exchange(kWrite); |
143 | | MOZ_ASSERT(IsIdle(oldState)); |
144 | | } |
145 | | |
146 | | void EndWriteOp() |
147 | | { |
148 | | // Check again that the table is writable, in case it was marked as |
149 | | // non-writable just after the IsWritable() assertion in StartWriteOp() |
150 | | // occurred. |
151 | | MOZ_ASSERT(IsWritable()); |
152 | | uint32_t oldState = mState.exchange(kIdle); |
153 | | MOZ_ASSERT(IsWrite(oldState)); |
154 | | } |
155 | | |
156 | | void StartIteratorRemovalOp() |
157 | | { |
158 | | // When doing removals at the end of iteration, we go from Read1 state to |
159 | | // Write and then back. |
160 | | MOZ_ASSERT(IsWritable()); |
161 | | uint32_t oldState = mState.exchange(kWrite); |
162 | | MOZ_ASSERT(IsRead1(oldState)); |
163 | | } |
164 | | |
165 | | void EndIteratorRemovalOp() |
166 | | { |
167 | | // Check again that the table is writable, in case it was marked as |
168 | | // non-writable just after the IsWritable() assertion in |
169 | | // StartIteratorRemovalOp() occurred. |
170 | | MOZ_ASSERT(IsWritable()); |
171 | | uint32_t oldState = mState.exchange(kRead1); |
172 | | MOZ_ASSERT(IsWrite(oldState)); |
173 | | } |
174 | | |
175 | | void StartDestructorOp() |
176 | | { |
177 | | // A destructor op is like a write, but the table doesn't need to be |
178 | | // writable. |
179 | | uint32_t oldState = mState.exchange(kWrite); |
180 | | MOZ_ASSERT(IsIdle(oldState)); |
181 | | } |
182 | | |
183 | | void EndDestructorOp() |
184 | | { |
185 | | uint32_t oldState = mState.exchange(kIdle); |
186 | | MOZ_ASSERT(IsWrite(oldState)); |
187 | | } |
188 | | |
189 | | private: |
190 | | // Things of note about the representation of |mState|. |
191 | | // - The values between kRead1..kReadMax represent valid Read(n) values. |
192 | | // - kIdle and kRead1 are deliberately chosen so that incrementing the - |
193 | | // former gives the latter. |
194 | | // - 9999 concurrent readers should be enough for anybody. |
195 | | static const uint32_t kIdle = 0; |
196 | | static const uint32_t kRead1 = 1; |
197 | | static const uint32_t kReadMax = 9999; |
198 | | static const uint32_t kWrite = 10000; |
199 | | |
200 | | mutable mozilla::Atomic<uint32_t, |
201 | | mozilla::SequentiallyConsistent, |
202 | | mozilla::recordreplay::Behavior::DontPreserve> mState; |
203 | | mutable mozilla::Atomic<uint32_t, |
204 | | mozilla::SequentiallyConsistent, |
205 | | mozilla::recordreplay::Behavior::DontPreserve> mIsWritable; |
206 | | }; |
207 | | #endif |
208 | | |
209 | | // A PLDHashTable may be allocated on the stack or within another structure or |
210 | | // class. No entry storage is allocated until the first element is added. This |
211 | | // means that empty hash tables are cheap, which is good because they are |
212 | | // common. |
213 | | // |
214 | | // There used to be a long, math-heavy comment here about the merits of |
215 | | // double hashing vs. chaining; it was removed in bug 1058335. In short, double |
216 | | // hashing is more space-efficient unless the element size gets large (in which |
217 | | // case you should keep using double hashing but switch to using pointer |
218 | | // elements). Also, with double hashing, you can't safely hold an entry pointer |
219 | | // and use it after an add or remove operation, unless you sample Generation() |
220 | | // before adding or removing, and compare the sample after, dereferencing the |
221 | | // entry pointer only if Generation() has not changed. |
222 | | class PLDHashTable |
223 | | { |
224 | | private: |
225 | | // This class maintains the invariant that every time the entry store is |
226 | | // changed, the generation is updated. |
227 | | // |
228 | | // Note: It would be natural to store the generation within this class, but |
229 | | // we can't do that without bloating sizeof(PLDHashTable) on 64-bit machines. |
230 | | // So instead we store it outside this class, and Set() takes a pointer to it |
231 | | // and ensures it is updated as necessary. |
232 | | class EntryStore |
233 | | { |
234 | | private: |
235 | | char* mEntryStore; |
236 | | |
237 | | public: |
238 | 1.66k | EntryStore() : mEntryStore(nullptr) {} |
239 | | |
240 | | ~EntryStore() |
241 | 910 | { |
242 | 910 | free(mEntryStore); |
243 | 910 | mEntryStore = nullptr; |
244 | 910 | } |
245 | | |
246 | 17.1M | char* Get() { return mEntryStore; } |
247 | 145M | const char* Get() const { return mEntryStore; } |
248 | | |
249 | | void Set(char* aEntryStore, uint16_t* aGeneration) |
250 | 1.30k | { |
251 | 1.30k | mEntryStore = aEntryStore; |
252 | 1.30k | *aGeneration += 1; |
253 | 1.30k | } |
254 | | }; |
255 | | |
256 | | // These fields are packed carefully. On 32-bit platforms, |
257 | | // sizeof(PLDHashTable) is 20. On 64-bit platforms, sizeof(PLDHashTable) is |
258 | | // 32; 28 bytes of data followed by 4 bytes of padding for alignment. |
259 | | const PLDHashTableOps* const mOps; // Virtual operations; see below. |
260 | | EntryStore mEntryStore; // (Lazy) entry storage and generation. |
261 | | uint16_t mGeneration; // The storage generation. |
262 | | uint8_t mHashShift; // Multiplicative hash shift. |
263 | | const uint8_t mEntrySize; // Number of bytes in an entry. |
264 | | uint32_t mEntryCount; // Number of entries in table. |
265 | | uint32_t mRemovedCount; // Removed entry sentinels in table. |
266 | | |
267 | | #ifdef DEBUG |
268 | | mutable Checker mChecker; |
269 | | #endif |
270 | | |
271 | | public: |
272 | | // Table capacity limit; do not exceed. The max capacity used to be 1<<23 but |
273 | | // that occasionally that wasn't enough. Making it much bigger than 1<<26 |
274 | | // probably isn't worthwhile -- tables that big are kind of ridiculous. |
275 | | // Also, the growth operation will (deliberately) fail if |capacity * |
276 | | // mEntrySize| overflows a uint32_t, and mEntrySize is always at least 8 |
277 | | // bytes. |
278 | | static const uint32_t kMaxCapacity = ((uint32_t)1 << 26); |
279 | | |
280 | | static const uint32_t kMinCapacity = 8; |
281 | | |
282 | | // Making this half of kMaxCapacity ensures it'll fit. Nobody should need an |
283 | | // initial length anywhere nearly this large, anyway. |
284 | | static const uint32_t kMaxInitialLength = kMaxCapacity / 2; |
285 | | |
286 | | // This gives a default initial capacity of 8. |
287 | | static const uint32_t kDefaultInitialLength = 4; |
288 | | |
289 | | // Initialize the table with |aOps| and |aEntrySize|. The table's initial |
290 | | // capacity is chosen such that |aLength| elements can be inserted without |
291 | | // rehashing; if |aLength| is a power-of-two, this capacity will be |
292 | | // |2*length|. However, because entry storage is allocated lazily, this |
293 | | // initial capacity won't be relevant until the first element is added; prior |
294 | | // to that the capacity will be zero. |
295 | | // |
296 | | // This will crash if |aEntrySize| and/or |aLength| are too large. |
297 | | PLDHashTable(const PLDHashTableOps* aOps, uint32_t aEntrySize, |
298 | | uint32_t aLength = kDefaultInitialLength); |
299 | | |
300 | | PLDHashTable(PLDHashTable&& aOther) |
301 | | // Initialize fields which are checked by the move assignment operator |
302 | | // and the destructor (which the move assignment operator calls). |
303 | | : mOps(nullptr) |
304 | | , mEntryStore() |
305 | | , mGeneration(0) |
306 | | , mEntrySize(0) |
307 | | #ifdef DEBUG |
308 | | , mChecker() |
309 | | #endif |
310 | 0 | { |
311 | 0 | *this = std::move(aOther); |
312 | 0 | } |
313 | | |
314 | | PLDHashTable& operator=(PLDHashTable&& aOther); |
315 | | |
316 | | ~PLDHashTable(); |
317 | | |
318 | | // This should be used rarely. |
319 | | const PLDHashTableOps* Ops() const |
320 | 0 | { |
321 | 0 | return mozilla::recordreplay::UnwrapPLDHashTableCallbacks(mOps); |
322 | 0 | } |
323 | | |
324 | | // Size in entries (gross, not net of free and removed sentinels) for table. |
325 | | // This can be zero if no elements have been added yet, in which case the |
326 | | // entry storage will not have yet been allocated. |
327 | | uint32_t Capacity() const |
328 | 17.1M | { |
329 | 17.1M | return mEntryStore.Get() ? CapacityFromHashShift() : 0; |
330 | 17.1M | } |
331 | | |
332 | 0 | uint32_t EntrySize() const { return mEntrySize; } |
333 | 15.0M | uint32_t EntryCount() const { return mEntryCount; } |
334 | 0 | uint32_t Generation() const { return mGeneration; } |
335 | | |
336 | | // To search for a |key| in |table|, call: |
337 | | // |
338 | | // entry = table.Search(key); |
339 | | // |
340 | | // If |entry| is non-null, |key| was found. If |entry| is null, key was not |
341 | | // found. |
342 | | PLDHashEntryHdr* Search(const void* aKey) const; |
343 | | |
344 | | // To add an entry identified by |key| to table, call: |
345 | | // |
346 | | // entry = table.Add(key, mozilla::fallible); |
347 | | // |
348 | | // If |entry| is null upon return, then the table is severely overloaded and |
349 | | // memory can't be allocated for entry storage. |
350 | | // |
351 | | // Otherwise, |aEntry->mKeyHash| has been set so that |
352 | | // PLDHashTable::EntryIsFree(entry) is false, and it is up to the caller to |
353 | | // initialize the key and value parts of the entry sub-type, if they have not |
354 | | // been set already (i.e. if entry was not already in the table, and if the |
355 | | // optional initEntry hook was not used). |
356 | | PLDHashEntryHdr* Add(const void* aKey, const mozilla::fallible_t&); |
357 | | |
358 | | // This is like the other Add() function, but infallible, and so never |
359 | | // returns null. |
360 | | PLDHashEntryHdr* Add(const void* aKey); |
361 | | |
362 | | // To remove an entry identified by |key| from table, call: |
363 | | // |
364 | | // table.Remove(key); |
365 | | // |
366 | | // If |key|'s entry is found, it is cleared (via table->mOps->clearEntry). |
367 | | // The table's capacity may be reduced afterwards. |
368 | | void Remove(const void* aKey); |
369 | | |
370 | | // To remove an entry found by a prior search, call: |
371 | | // |
372 | | // table.RemoveEntry(entry); |
373 | | // |
374 | | // The entry, which must be present and in use, is cleared (via |
375 | | // table->mOps->clearEntry). The table's capacity may be reduced afterwards. |
376 | | void RemoveEntry(PLDHashEntryHdr* aEntry); |
377 | | |
378 | | // Remove an entry already accessed via Search() or Add(). |
379 | | // |
380 | | // NB: this is a "raw" or low-level method. It does not shrink the table if |
381 | | // it is underloaded. Don't use it unless necessary and you know what you are |
382 | | // doing, and if so, please explain in a comment why it is necessary instead |
383 | | // of RemoveEntry(). |
384 | | void RawRemove(PLDHashEntryHdr* aEntry); |
385 | | |
386 | | // This function is equivalent to |
387 | | // ClearAndPrepareForLength(kDefaultInitialLength). |
388 | | void Clear(); |
389 | | |
390 | | // This function clears the table's contents and frees its entry storage, |
391 | | // leaving it in a empty state ready to be used again. Afterwards, when the |
392 | | // first element is added the entry storage that gets allocated will have a |
393 | | // capacity large enough to fit |aLength| elements without rehashing. |
394 | | // |
395 | | // It's conceptually the same as calling the destructor and then re-calling |
396 | | // the constructor with the original |aOps| and |aEntrySize| arguments, and |
397 | | // a new |aLength| argument. |
398 | | void ClearAndPrepareForLength(uint32_t aLength); |
399 | | |
400 | | // Measure the size of the table's entry storage. If the entries contain |
401 | | // pointers to other heap blocks, you have to iterate over the table and |
402 | | // measure those separately; hence the "Shallow" prefix. |
403 | | size_t ShallowSizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; |
404 | | |
405 | | // Like ShallowSizeOfExcludingThis(), but includes sizeof(*this). |
406 | | size_t ShallowSizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; |
407 | | |
408 | | #ifdef DEBUG |
409 | | // Mark a table as immutable for the remainder of its lifetime. This |
410 | | // changes the implementation from asserting one set of invariants to |
411 | | // asserting a different set. |
412 | | void MarkImmutable(); |
413 | | #endif |
414 | | |
415 | | // If you use PLDHashEntryStub or a subclass of it as your entry struct, and |
416 | | // if your entries move via memcpy and clear via memset(0), you can use these |
417 | | // stub operations. |
418 | | static const PLDHashTableOps* StubOps(); |
419 | | |
420 | | // The individual stub operations in StubOps(). |
421 | | static PLDHashNumber HashVoidPtrKeyStub(const void* aKey); |
422 | | static bool MatchEntryStub(const PLDHashEntryHdr* aEntry, const void* aKey); |
423 | | static void MoveEntryStub(PLDHashTable* aTable, const PLDHashEntryHdr* aFrom, |
424 | | PLDHashEntryHdr* aTo); |
425 | | static void ClearEntryStub(PLDHashTable* aTable, PLDHashEntryHdr* aEntry); |
426 | | |
427 | | // Hash/match operations for tables holding C strings. |
428 | | static PLDHashNumber HashStringKey(const void* aKey); |
429 | | static bool MatchStringKey(const PLDHashEntryHdr* aEntry, const void* aKey); |
430 | | |
431 | | // This is an iterator for PLDHashtable. Assertions will detect some, but not |
432 | | // all, mid-iteration table modifications that might invalidate (e.g. |
433 | | // reallocate) the entry storage. |
434 | | // |
435 | | // Any element can be removed during iteration using Remove(). If any |
436 | | // elements are removed, the table may be resized once iteration ends. |
437 | | // |
438 | | // Example usage: |
439 | | // |
440 | | // for (auto iter = table.Iter(); !iter.Done(); iter.Next()) { |
441 | | // auto entry = static_cast<FooEntry*>(iter.Get()); |
442 | | // // ... do stuff with |entry| ... |
443 | | // // ... possibly call iter.Remove() once ... |
444 | | // } |
445 | | // |
446 | | // or: |
447 | | // |
448 | | // for (PLDHashTable::Iterator iter(&table); !iter.Done(); iter.Next()) { |
449 | | // auto entry = static_cast<FooEntry*>(iter.Get()); |
450 | | // // ... do stuff with |entry| ... |
451 | | // // ... possibly call iter.Remove() once ... |
452 | | // } |
453 | | // |
454 | | // The latter form is more verbose but is easier to work with when |
455 | | // making subclasses of Iterator. |
456 | | // |
457 | | class Iterator |
458 | | { |
459 | | public: |
460 | | explicit Iterator(PLDHashTable* aTable); |
461 | | Iterator(Iterator&& aOther); |
462 | | ~Iterator(); |
463 | | |
464 | | // Have we finished? |
465 | 19.7M | bool Done() const { return mNexts == mNextsLimit; } |
466 | | |
467 | | // Get the current entry. |
468 | | PLDHashEntryHdr* Get() const |
469 | 16.0M | { |
470 | 16.0M | MOZ_ASSERT(!Done()); |
471 | 16.0M | |
472 | 16.0M | PLDHashEntryHdr* entry = reinterpret_cast<PLDHashEntryHdr*>(mCurrent); |
473 | 16.0M | MOZ_ASSERT(EntryIsLive(entry)); |
474 | 16.0M | return entry; |
475 | 16.0M | } |
476 | | |
477 | | // Advance to the next entry. |
478 | | void Next(); |
479 | | |
480 | | // Remove the current entry. Must only be called once per entry, and Get() |
481 | | // must not be called on that entry afterwards. |
482 | | void Remove(); |
483 | | |
484 | | protected: |
485 | | PLDHashTable* mTable; // Main table pointer. |
486 | | |
487 | | private: |
488 | | char* mStart; // The first entry. |
489 | | char* mLimit; // One past the last entry. |
490 | | char* mCurrent; // Pointer to the current entry. |
491 | | uint32_t mNexts; // Number of Next() calls. |
492 | | uint32_t mNextsLimit; // Next() call limit. |
493 | | |
494 | | bool mHaveRemoved; // Have any elements been removed? |
495 | | |
496 | | bool IsOnNonLiveEntry() const; |
497 | | void MoveToNextEntry(); |
498 | | |
499 | | Iterator() = delete; |
500 | | Iterator(const Iterator&) = delete; |
501 | | Iterator& operator=(const Iterator&) = delete; |
502 | | Iterator& operator=(const Iterator&&) = delete; |
503 | | }; |
504 | | |
505 | 162 | Iterator Iter() { return Iterator(this); } |
506 | | |
507 | | // Use this if you need to initialize an Iterator in a const method. If you |
508 | | // use this case, you should not call Remove() on the iterator. |
509 | | Iterator ConstIter() const |
510 | 0 | { |
511 | 0 | return Iterator(const_cast<PLDHashTable*>(this)); |
512 | 0 | } |
513 | | |
514 | | private: |
515 | | static uint32_t HashShift(uint32_t aEntrySize, uint32_t aLength); |
516 | | |
517 | | static const PLDHashNumber kCollisionFlag = 1; |
518 | | |
519 | | static bool EntryIsFree(const PLDHashEntryHdr* aEntry) |
520 | 97.8M | { |
521 | 97.8M | return aEntry->mKeyHash == 0; |
522 | 97.8M | } |
523 | | static bool EntryIsRemoved(const PLDHashEntryHdr* aEntry) |
524 | 18.8M | { |
525 | 18.8M | return aEntry->mKeyHash == 1; |
526 | 18.8M | } |
527 | | static bool EntryIsLive(const PLDHashEntryHdr* aEntry) |
528 | 54.9M | { |
529 | 54.9M | return aEntry->mKeyHash >= 2; |
530 | 54.9M | } |
531 | | |
532 | | static void MarkEntryFree(PLDHashEntryHdr* aEntry) |
533 | 4.03M | { |
534 | 4.03M | aEntry->mKeyHash = 0; |
535 | 4.03M | } |
536 | | static void MarkEntryRemoved(PLDHashEntryHdr* aEntry) |
537 | 3.74M | { |
538 | 3.74M | aEntry->mKeyHash = 1; |
539 | 3.74M | } |
540 | | |
541 | | PLDHashNumber Hash1(PLDHashNumber aHash0) const; |
542 | | void Hash2(PLDHashNumber aHash, |
543 | | uint32_t& aHash2Out, uint32_t& aSizeMaskOut) const; |
544 | | |
545 | | static bool MatchEntryKeyhash(const PLDHashEntryHdr* aEntry, |
546 | | const PLDHashNumber aHash); |
547 | | PLDHashEntryHdr* AddressEntry(uint32_t aIndex) const; |
548 | | |
549 | | // We store mHashShift rather than sizeLog2 to optimize the collision-free |
550 | | // case in SearchTable. |
551 | | uint32_t CapacityFromHashShift() const |
552 | 17.1M | { |
553 | 17.1M | return ((uint32_t)1 << (kPLDHashNumberBits - mHashShift)); |
554 | 17.1M | } |
555 | | |
556 | | PLDHashNumber ComputeKeyHash(const void* aKey) const; |
557 | | |
558 | | enum SearchReason { ForSearchOrRemove, ForAdd }; |
559 | | |
560 | | template <SearchReason Reason> |
561 | | PLDHashEntryHdr* NS_FASTCALL |
562 | | SearchTable(const void* aKey, PLDHashNumber aKeyHash) const; |
563 | | |
564 | | PLDHashEntryHdr* FindFreeEntry(PLDHashNumber aKeyHash) const; |
565 | | |
566 | | bool ChangeTable(int aDeltaLog2); |
567 | | |
568 | | void ShrinkIfAppropriate(); |
569 | | |
570 | | PLDHashTable(const PLDHashTable& aOther) = delete; |
571 | | PLDHashTable& operator=(const PLDHashTable& aOther) = delete; |
572 | | }; |
573 | | |
574 | | // Compute the hash code for a given key to be looked up, added, or removed. |
575 | | // A hash code may have any PLDHashNumber value. |
576 | | typedef PLDHashNumber (*PLDHashHashKey)(const void* aKey); |
577 | | |
578 | | // Compare the key identifying aEntry with the provided key parameter. Return |
579 | | // true if keys match, false otherwise. |
580 | | typedef bool (*PLDHashMatchEntry)(const PLDHashEntryHdr* aEntry, |
581 | | const void* aKey); |
582 | | |
583 | | // Copy the data starting at aFrom to the new entry storage at aTo. Do not add |
584 | | // reference counts for any strong references in the entry, however, as this |
585 | | // is a "move" operation: the old entry storage at from will be freed without |
586 | | // any reference-decrementing callback shortly. |
587 | | typedef void (*PLDHashMoveEntry)(PLDHashTable* aTable, |
588 | | const PLDHashEntryHdr* aFrom, |
589 | | PLDHashEntryHdr* aTo); |
590 | | |
591 | | // Clear the entry and drop any strong references it holds. This callback is |
592 | | // invoked by Remove(), but only if the given key is found in the table. |
593 | | typedef void (*PLDHashClearEntry)(PLDHashTable* aTable, |
594 | | PLDHashEntryHdr* aEntry); |
595 | | |
596 | | // Initialize a new entry, apart from mKeyHash. This function is called when |
597 | | // Add() finds no existing entry for the given key, and must add a new one. At |
598 | | // that point, |aEntry->mKeyHash| is not set yet, to avoid claiming the last |
599 | | // free entry in a severely overloaded table. |
600 | | typedef void (*PLDHashInitEntry)(PLDHashEntryHdr* aEntry, const void* aKey); |
601 | | |
602 | | // Finally, the "vtable" structure for PLDHashTable. The first four hooks |
603 | | // must be provided by implementations; they're called unconditionally by the |
604 | | // generic PLDHashTable.cpp code. Hooks after these may be null. |
605 | | // |
606 | | // Summary of allocation-related hook usage with C++ placement new emphasis: |
607 | | // initEntry Call placement new using default key-based ctor. |
608 | | // moveEntry Call placement new using copy ctor, run dtor on old |
609 | | // entry storage. |
610 | | // clearEntry Run dtor on entry. |
611 | | // |
612 | | // Note the reason why initEntry is optional: the default hooks (stubs) clear |
613 | | // entry storage: On successful Add(tbl, key), the returned entry pointer |
614 | | // addresses an entry struct whose mKeyHash member has been set non-zero, but |
615 | | // all other entry members are still clear (null). Add() callers can test such |
616 | | // members to see whether the entry was newly created by the Add() call that |
617 | | // just succeeded. If placement new or similar initialization is required, |
618 | | // define an |initEntry| hook. Of course, the |clearEntry| hook must zero or |
619 | | // null appropriately. |
620 | | // |
621 | | // XXX assumes 0 is null for pointer types. |
622 | | struct PLDHashTableOps |
623 | | { |
624 | | // Mandatory hooks. All implementations must provide these. |
625 | | PLDHashHashKey hashKey; |
626 | | PLDHashMatchEntry matchEntry; |
627 | | PLDHashMoveEntry moveEntry; |
628 | | PLDHashClearEntry clearEntry; |
629 | | |
630 | | // Optional hooks start here. If null, these are not called. |
631 | | PLDHashInitEntry initEntry; |
632 | | }; |
633 | | |
634 | | // A minimal entry is a subclass of PLDHashEntryHdr and has a void* key pointer. |
635 | | struct PLDHashEntryStub : public PLDHashEntryHdr |
636 | | { |
637 | | const void* key; |
638 | | }; |
639 | | |
640 | | #endif /* PLDHashTable_h */ |