/src/hermes/lib/VM/OrderedHashMap.cpp
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) Meta Platforms, Inc. and affiliates. |
3 | | * |
4 | | * This source code is licensed under the MIT license found in the |
5 | | * LICENSE file in the root directory of this source tree. |
6 | | */ |
7 | | |
8 | | #include "hermes/VM/OrderedHashMap.h" |
9 | | |
10 | | #include "hermes/Support/ErrorHandling.h" |
11 | | #include "hermes/VM/BuildMetadata.h" |
12 | | #include "hermes/VM/GCPointer-inline.h" |
13 | | #include "hermes/VM/Operations.h" |
14 | | |
15 | | namespace hermes { |
16 | | namespace vm { |
17 | | //===----------------------------------------------------------------------===// |
18 | | // class HashMapEntry |
19 | | |
20 | | const VTable HashMapEntry::vt{ |
21 | | CellKind::HashMapEntryKind, |
22 | | cellSize<HashMapEntry>()}; |
23 | | |
24 | 1 | void HashMapEntryBuildMeta(const GCCell *cell, Metadata::Builder &mb) { |
25 | 1 | const auto *self = static_cast<const HashMapEntry *>(cell); |
26 | 1 | mb.setVTable(&HashMapEntry::vt); |
27 | 1 | mb.addField("key", &self->key); |
28 | 1 | mb.addField("value", &self->value); |
29 | 1 | mb.addField("prevIterationEntry", &self->prevIterationEntry); |
30 | 1 | mb.addField("nextIterationEntry", &self->nextIterationEntry); |
31 | 1 | mb.addField("nextEntryInBucket", &self->nextEntryInBucket); |
32 | 1 | } |
33 | | |
34 | 0 | CallResult<PseudoHandle<HashMapEntry>> HashMapEntry::create(Runtime &runtime) { |
35 | 0 | return createPseudoHandle(runtime.makeAFixed<HashMapEntry>()); |
36 | 0 | } |
37 | | |
38 | | //===----------------------------------------------------------------------===// |
39 | | // class OrderedHashMap |
40 | | |
41 | | const VTable OrderedHashMap::vt{ |
42 | | CellKind::OrderedHashMapKind, |
43 | | cellSize<OrderedHashMap>()}; |
44 | | |
45 | 1 | void OrderedHashMapBuildMeta(const GCCell *cell, Metadata::Builder &mb) { |
46 | 1 | const auto *self = static_cast<const OrderedHashMap *>(cell); |
47 | 1 | mb.setVTable(&OrderedHashMap::vt); |
48 | 1 | mb.addField("hashTable", &self->hashTable_); |
49 | 1 | mb.addField("firstIterationEntry", &self->firstIterationEntry_); |
50 | 1 | mb.addField("lastIterationEntry", &self->lastIterationEntry_); |
51 | 1 | } |
52 | | |
53 | | OrderedHashMap::OrderedHashMap( |
54 | | Runtime &runtime, |
55 | | Handle<ArrayStorageSmall> hashTableStorage) |
56 | 113 | : hashTable_(runtime, hashTableStorage.get(), runtime.getHeap()) {} |
57 | | |
58 | | CallResult<PseudoHandle<OrderedHashMap>> OrderedHashMap::create( |
59 | 113 | Runtime &runtime) { |
60 | 113 | auto arrRes = |
61 | 113 | ArrayStorageSmall::create(runtime, INITIAL_CAPACITY, INITIAL_CAPACITY); |
62 | 113 | if (LLVM_UNLIKELY(arrRes == ExecutionStatus::EXCEPTION)) { |
63 | 0 | return ExecutionStatus::EXCEPTION; |
64 | 0 | } |
65 | 113 | auto hashTableStorage = runtime.makeHandle<ArrayStorageSmall>(*arrRes); |
66 | | |
67 | 113 | return createPseudoHandle( |
68 | 113 | runtime.makeAFixed<OrderedHashMap>(runtime, hashTableStorage)); |
69 | 113 | } |
70 | | |
71 | | void OrderedHashMap::removeLinkedListNode( |
72 | | Runtime &runtime, |
73 | | HashMapEntry *entry, |
74 | 0 | GC &gc) { |
75 | 0 | assert( |
76 | 0 | entry != lastIterationEntry_.get(runtime) && |
77 | 0 | "Cannot remove the last entry"); |
78 | 0 | if (entry->prevIterationEntry) { |
79 | 0 | entry->prevIterationEntry.getNonNull(runtime)->nextIterationEntry.set( |
80 | 0 | runtime, entry->nextIterationEntry, gc); |
81 | 0 | } |
82 | 0 | if (entry->nextIterationEntry) { |
83 | 0 | entry->nextIterationEntry.getNonNull(runtime)->prevIterationEntry.set( |
84 | 0 | runtime, entry->prevIterationEntry, gc); |
85 | 0 | } |
86 | 0 | if (entry == firstIterationEntry_.get(runtime)) { |
87 | 0 | firstIterationEntry_.set(runtime, entry->nextIterationEntry, gc); |
88 | 0 | } |
89 | 0 | entry->prevIterationEntry.setNull(runtime.getHeap()); |
90 | 0 | } |
91 | | |
92 | 0 | static HashMapEntry *castToMapEntry(SmallHermesValue shv, PointerBase &base) { |
93 | 0 | if (shv.isEmpty()) |
94 | 0 | return nullptr; |
95 | 0 | return vmcast<HashMapEntry>(shv.getObject(base)); |
96 | 0 | } |
97 | | |
98 | | HashMapEntry *OrderedHashMap::lookupInBucket( |
99 | | Runtime &runtime, |
100 | | uint32_t bucket, |
101 | 0 | HermesValue key) { |
102 | 0 | assert( |
103 | 0 | hashTable_.getNonNull(runtime)->size() == capacity_ && |
104 | 0 | "Inconsistent capacity"); |
105 | 0 | auto *entry = |
106 | 0 | castToMapEntry(hashTable_.getNonNull(runtime)->at(bucket), runtime); |
107 | 0 | while (entry && !isSameValueZero(entry->key, key)) { |
108 | 0 | entry = entry->nextEntryInBucket.get(runtime); |
109 | 0 | } |
110 | 0 | return entry; |
111 | 0 | } |
112 | | |
113 | | ExecutionStatus OrderedHashMap::rehashIfNecessary( |
114 | | Handle<OrderedHashMap> self, |
115 | 0 | Runtime &runtime) { |
116 | 0 | uint32_t newCapacity = self->capacity_; |
117 | | // NOTE: we have ensured that self->capacity_ * 4 never overflows uint32_t by |
118 | | // setting MAX_CAPACITY to the appropriate value. self->size_ is always <= |
119 | | // self->capacity_, so this applies to self->size_ as well. |
120 | 0 | static_assert( |
121 | 0 | MAX_CAPACITY < UINT32_MAX / 4, |
122 | 0 | "Avoid overflow checks on multiplying capacity by 4"); |
123 | 0 | if (self->size_ * 4 > self->capacity_ * 3) { |
124 | | // Load factor is more than 0.75, need to increase the capacity. |
125 | 0 | newCapacity = self->capacity_ * 2; |
126 | 0 | if (LLVM_UNLIKELY(newCapacity > MAX_CAPACITY)) { |
127 | | // We maintain the invariant that the capacity_ is a power of two. |
128 | | // Therefore, if doubling would exceed the max, we revert to the |
129 | | // previous value. (Assert the invariant to make it clear.) |
130 | 0 | assert( |
131 | 0 | (self->capacity_ & (self->capacity_ - 1)) == 0 && |
132 | 0 | "capacity_ must be power of 2"); |
133 | 0 | newCapacity = self->capacity_; |
134 | 0 | } |
135 | 0 | } else if ( |
136 | 0 | self->size_ * 4 < self->capacity_ && self->capacity_ > INITIAL_CAPACITY) { |
137 | | // Load factor is less than 0.25, and we are not at initial cap. |
138 | 0 | newCapacity = self->capacity_ / 2; |
139 | 0 | } |
140 | 0 | if (newCapacity == self->capacity_) { |
141 | | // No need to rehash. |
142 | 0 | return ExecutionStatus::RETURNED; |
143 | 0 | } |
144 | 0 | assert( |
145 | 0 | self->size_ && self->firstIterationEntry_ && self->lastIterationEntry_ && |
146 | 0 | "We should never be rehashing when there are no elements"); |
147 | | |
148 | | // Set new capacity first to update the hash function. |
149 | 0 | self->capacity_ = newCapacity; |
150 | | |
151 | | // Create a new hash table. |
152 | 0 | auto arrRes = ArrayStorageSmall::create(runtime, newCapacity, newCapacity); |
153 | 0 | if (LLVM_UNLIKELY(arrRes == ExecutionStatus::EXCEPTION)) { |
154 | 0 | return ExecutionStatus::EXCEPTION; |
155 | 0 | } |
156 | 0 | auto newHashTable = runtime.makeHandle<ArrayStorageSmall>(*arrRes); |
157 | | |
158 | | // Now re-add all entries to the hash table. |
159 | 0 | MutableHandle<HashMapEntry> entry{runtime}; |
160 | 0 | MutableHandle<HashMapEntry> oldNextInBucket{runtime}; |
161 | 0 | MutableHandle<> keyHandle{runtime}; |
162 | 0 | GCScopeMarkerRAII marker{runtime}; |
163 | 0 | for (unsigned i = 0, len = self->hashTable_.getNonNull(runtime)->size(); |
164 | 0 | i < len; |
165 | 0 | ++i) { |
166 | 0 | entry = |
167 | 0 | castToMapEntry(self->hashTable_.getNonNull(runtime)->at(i), runtime); |
168 | 0 | while (entry) { |
169 | 0 | marker.flush(); |
170 | 0 | keyHandle = entry->key; |
171 | 0 | uint32_t bucket = hashToBucket(self, runtime, keyHandle); |
172 | 0 | oldNextInBucket = entry->nextEntryInBucket.get(runtime); |
173 | 0 | if (newHashTable->at(bucket).isEmpty()) { |
174 | | // Empty bucket. |
175 | 0 | entry->nextEntryInBucket.setNull(runtime.getHeap()); |
176 | 0 | } else { |
177 | | // There are already a bucket head. |
178 | 0 | entry->nextEntryInBucket.set( |
179 | 0 | runtime, |
180 | 0 | vmcast<HashMapEntry>(newHashTable->at(bucket).getObject(runtime)), |
181 | 0 | runtime.getHeap()); |
182 | 0 | } |
183 | | // Update bucket head to the new entry. |
184 | 0 | newHashTable->set( |
185 | 0 | bucket, |
186 | 0 | SmallHermesValue::encodeObjectValue(*entry, runtime), |
187 | 0 | runtime.getHeap()); |
188 | |
|
189 | 0 | entry = *oldNextInBucket; |
190 | 0 | } |
191 | 0 | } |
192 | |
|
193 | 0 | self->hashTable_.setNonNull(runtime, newHashTable.get(), runtime.getHeap()); |
194 | 0 | assert( |
195 | 0 | self->hashTable_.getNonNull(runtime)->size() == self->capacity_ && |
196 | 0 | "Inconsistent capacity"); |
197 | 0 | return ExecutionStatus::RETURNED; |
198 | 0 | } |
199 | | |
200 | | bool OrderedHashMap::has( |
201 | | Handle<OrderedHashMap> self, |
202 | | Runtime &runtime, |
203 | 0 | Handle<> key) { |
204 | 0 | auto bucket = hashToBucket(self, runtime, key); |
205 | 0 | return self->lookupInBucket(runtime, bucket, key.getHermesValue()); |
206 | 0 | } |
207 | | |
208 | | HashMapEntry *OrderedHashMap::find( |
209 | | Handle<OrderedHashMap> self, |
210 | | Runtime &runtime, |
211 | 0 | Handle<> key) { |
212 | 0 | auto bucket = hashToBucket(self, runtime, key); |
213 | 0 | return self->lookupInBucket(runtime, bucket, key.getHermesValue()); |
214 | 0 | } |
215 | | |
216 | | HermesValue OrderedHashMap::get( |
217 | | Handle<OrderedHashMap> self, |
218 | | Runtime &runtime, |
219 | 0 | Handle<> key) { |
220 | 0 | auto *entry = find(self, runtime, key); |
221 | 0 | if (!entry) { |
222 | 0 | return HermesValue::encodeUndefinedValue(); |
223 | 0 | } |
224 | 0 | return entry->value; |
225 | 0 | } |
226 | | |
227 | | ExecutionStatus OrderedHashMap::insert( |
228 | | Handle<OrderedHashMap> self, |
229 | | Runtime &runtime, |
230 | | Handle<> key, |
231 | 0 | Handle<> value) { |
232 | 0 | uint32_t bucket = hashToBucket(self, runtime, key); |
233 | 0 | if (auto *entry = |
234 | 0 | self->lookupInBucket(runtime, bucket, key.getHermesValue())) { |
235 | | // Element already exists, update value and return. |
236 | 0 | entry->value.set(value.get(), runtime.getHeap()); |
237 | 0 | return ExecutionStatus::RETURNED; |
238 | 0 | } |
239 | | // Create a new entry, set the key and value. |
240 | 0 | auto crtRes = HashMapEntry::create(runtime); |
241 | 0 | if (LLVM_UNLIKELY(crtRes == ExecutionStatus::EXCEPTION)) { |
242 | 0 | return ExecutionStatus::EXCEPTION; |
243 | 0 | } |
244 | 0 | auto newMapEntry = runtime.makeHandle(std::move(*crtRes)); |
245 | 0 | newMapEntry->key.set(key.get(), runtime.getHeap()); |
246 | 0 | newMapEntry->value.set(value.get(), runtime.getHeap()); |
247 | 0 | auto *curBucketFront = |
248 | 0 | castToMapEntry(self->hashTable_.getNonNull(runtime)->at(bucket), runtime); |
249 | 0 | if (curBucketFront) { |
250 | | // If the bucket we are inserting to is not empty, we maintain the |
251 | | // linked list properly. |
252 | 0 | newMapEntry->nextEntryInBucket.set( |
253 | 0 | runtime, curBucketFront, runtime.getHeap()); |
254 | 0 | } |
255 | | // Set the newly inserted entry as the front of this bucket chain. |
256 | 0 | self->hashTable_.getNonNull(runtime)->set( |
257 | 0 | bucket, |
258 | 0 | SmallHermesValue::encodeObjectValue(*newMapEntry, runtime), |
259 | 0 | runtime.getHeap()); |
260 | |
|
261 | 0 | if (!self->firstIterationEntry_) { |
262 | | // If we are inserting the first ever element, update |
263 | | // first iteration entry pointer. |
264 | 0 | self->firstIterationEntry_.set( |
265 | 0 | runtime, newMapEntry.get(), runtime.getHeap()); |
266 | 0 | self->lastIterationEntry_.set( |
267 | 0 | runtime, newMapEntry.get(), runtime.getHeap()); |
268 | 0 | } else { |
269 | | // Connect the new entry with the last entry. |
270 | 0 | self->lastIterationEntry_.getNonNull(runtime)->nextIterationEntry.set( |
271 | 0 | runtime, newMapEntry.get(), runtime.getHeap()); |
272 | 0 | newMapEntry->prevIterationEntry.set( |
273 | 0 | runtime, self->lastIterationEntry_, runtime.getHeap()); |
274 | |
|
275 | 0 | HashMapEntry *previousLastEntry = self->lastIterationEntry_.get(runtime); |
276 | 0 | self->lastIterationEntry_.set( |
277 | 0 | runtime, newMapEntry.get(), runtime.getHeap()); |
278 | |
|
279 | 0 | if (previousLastEntry && previousLastEntry->isDeleted()) { |
280 | | // If the last entry was a deleted entry, we no longer need to keep it. |
281 | 0 | self->removeLinkedListNode(runtime, previousLastEntry, runtime.getHeap()); |
282 | 0 | } |
283 | 0 | } |
284 | |
|
285 | 0 | self->size_++; |
286 | 0 | return self->rehashIfNecessary(self, runtime); |
287 | 0 | } |
288 | | |
289 | | bool OrderedHashMap::erase( |
290 | | Handle<OrderedHashMap> self, |
291 | | Runtime &runtime, |
292 | 0 | Handle<> key) { |
293 | 0 | uint32_t bucket = hashToBucket(self, runtime, key); |
294 | 0 | HashMapEntry *prevEntry = nullptr; |
295 | 0 | auto *entry = |
296 | 0 | castToMapEntry(self->hashTable_.getNonNull(runtime)->at(bucket), runtime); |
297 | 0 | while (entry && !isSameValueZero(entry->key, key.getHermesValue())) { |
298 | 0 | prevEntry = entry; |
299 | 0 | entry = entry->nextEntryInBucket.get(runtime); |
300 | 0 | } |
301 | 0 | if (!entry) { |
302 | | // Element does not exist. |
303 | 0 | return false; |
304 | 0 | } |
305 | | |
306 | 0 | if (prevEntry) { |
307 | | // The entry we are deleting has a previous entry, update the link. |
308 | 0 | prevEntry->nextEntryInBucket.set( |
309 | 0 | runtime, entry->nextEntryInBucket, runtime.getHeap()); |
310 | 0 | } else { |
311 | | // The entry we are erasing is the front entry in the bucket, we need |
312 | | // to update the bucket head to the next entry in the bucket, if not |
313 | | // any, set to empty value. |
314 | 0 | self->hashTable_.getNonNull(runtime)->set( |
315 | 0 | bucket, |
316 | 0 | entry->nextEntryInBucket |
317 | 0 | ? SmallHermesValue::encodeObjectValue(entry->nextEntryInBucket) |
318 | 0 | : SmallHermesValue::encodeEmptyValue(), |
319 | 0 | runtime.getHeap()); |
320 | 0 | } |
321 | |
|
322 | 0 | entry->markDeleted(runtime); |
323 | 0 | self->size_--; |
324 | | |
325 | | // Now delete the entry from the iteration linked list. |
326 | | // If we are deleting the last entry, don't remove it from the list yet. |
327 | | // This will ensure that if any iterator out there is currently pointing to |
328 | | // this entry, it will be able to follow on if we add new entries in the |
329 | | // future. |
330 | 0 | if (entry != self->lastIterationEntry_.get(runtime)) { |
331 | 0 | self->removeLinkedListNode(runtime, entry, runtime.getHeap()); |
332 | 0 | } |
333 | |
|
334 | 0 | self->rehashIfNecessary(self, runtime); |
335 | |
|
336 | 0 | return true; |
337 | 0 | } |
338 | | |
339 | | HashMapEntry *OrderedHashMap::iteratorNext( |
340 | | Runtime &runtime, |
341 | 0 | HashMapEntry *entry) const { |
342 | 0 | if (entry == nullptr) { |
343 | | // Starting a new iteration from the first entry. |
344 | 0 | entry = firstIterationEntry_.get(runtime); |
345 | 0 | } else { |
346 | | // Advance to the next entry. |
347 | 0 | entry = entry->nextIterationEntry.get(runtime); |
348 | 0 | } |
349 | | |
350 | | // Make sure the entry we returned (if not nullptr) must not be deleted. |
351 | 0 | while (entry && entry->isDeleted()) { |
352 | 0 | entry = entry->nextIterationEntry.get(runtime); |
353 | 0 | } |
354 | 0 | return entry; |
355 | 0 | } |
356 | | |
357 | 0 | void OrderedHashMap::clear(Runtime &runtime) { |
358 | 0 | if (!firstIterationEntry_) { |
359 | | // Empty set. |
360 | 0 | return; |
361 | 0 | } |
362 | | |
363 | | // Clear the hash table. |
364 | 0 | for (unsigned i = 0; i < capacity_; ++i) { |
365 | 0 | auto entry = castToMapEntry(hashTable_.getNonNull(runtime)->at(i), runtime); |
366 | | // Delete every element reachable from the hash table. |
367 | 0 | while (entry) { |
368 | 0 | entry->markDeleted(runtime); |
369 | 0 | entry = entry->nextEntryInBucket.get(runtime); |
370 | 0 | } |
371 | | // Clear every element in the hash table. |
372 | 0 | hashTable_.getNonNull(runtime)->setNonPtr( |
373 | 0 | i, SmallHermesValue::encodeEmptyValue(), runtime.getHeap()); |
374 | 0 | } |
375 | | // Resize the hash table to the initial size. |
376 | 0 | ArrayStorageSmall::resizeWithinCapacity( |
377 | 0 | hashTable_.getNonNull(runtime), runtime, INITIAL_CAPACITY); |
378 | 0 | capacity_ = INITIAL_CAPACITY; |
379 | | |
380 | | // After clearing, we will still keep the last deleted entry |
381 | | // in case there is an iterator out there |
382 | | // pointing to the middle of the iteration chain. We need it to be |
383 | | // able to merge back eventually. |
384 | 0 | firstIterationEntry_.set(runtime, lastIterationEntry_, runtime.getHeap()); |
385 | 0 | firstIterationEntry_.getNonNull(runtime)->prevIterationEntry.setNull( |
386 | 0 | runtime.getHeap()); |
387 | 0 | size_ = 0; |
388 | 0 | } |
389 | | |
390 | | } // namespace vm |
391 | | } // namespace hermes |