/src/php-src/Zend/zend_hash.c
Line | Count | Source |
1 | | /* |
2 | | +----------------------------------------------------------------------+ |
3 | | | Zend Engine | |
4 | | +----------------------------------------------------------------------+ |
5 | | | Copyright (c) Zend Technologies Ltd. (http://www.zend.com) | |
6 | | +----------------------------------------------------------------------+ |
7 | | | This source file is subject to version 2.00 of the Zend license, | |
8 | | | that is bundled with this package in the file LICENSE, and is | |
9 | | | available through the world-wide-web at the following url: | |
10 | | | http://www.zend.com/license/2_00.txt. | |
11 | | | If you did not receive a copy of the Zend license and are unable to | |
12 | | | obtain it through the world-wide-web, please send a note to | |
13 | | | license@zend.com so we can mail you a copy immediately. | |
14 | | +----------------------------------------------------------------------+ |
15 | | | Authors: Andi Gutmans <andi@php.net> | |
16 | | | Zeev Suraski <zeev@php.net> | |
17 | | | Dmitry Stogov <dmitry@php.net> | |
18 | | +----------------------------------------------------------------------+ |
19 | | */ |
20 | | |
21 | | #include "zend.h" |
22 | | #include "zend_globals.h" |
23 | | #include "zend_variables.h" |
24 | | |
25 | | #if defined(__aarch64__) || defined(_M_ARM64) |
26 | | # include <arm_neon.h> |
27 | | #endif |
28 | | |
29 | | /* Prefer to use AVX2 instructions for better latency and throughput */ |
30 | | #if defined(__AVX2__) |
31 | | # include <immintrin.h> |
32 | | #elif defined( __SSE2__) |
33 | | # include <emmintrin.h> |
34 | | #endif |
35 | | |
36 | | #if ZEND_DEBUG |
37 | | # define HT_ASSERT(ht, expr) \ |
38 | 4.93M | ZEND_ASSERT((expr) || (HT_FLAGS(ht) & HASH_FLAG_ALLOW_COW_VIOLATION)) |
39 | | #else |
40 | | # define HT_ASSERT(ht, expr) |
41 | | #endif |
42 | | |
43 | 3.19M | #define HT_ASSERT_RC1(ht) HT_ASSERT(ht, GC_REFCOUNT(ht) == 1) |
44 | | |
45 | 0 | #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1) |
46 | | |
47 | | #if ZEND_DEBUG |
48 | | |
49 | 9.84M | #define HT_OK 0x00 |
50 | 0 | #define HT_IS_DESTROYING 0x01 |
51 | 0 | #define HT_DESTROYED 0x02 |
52 | 0 | #define HT_CLEANING 0x03 |
53 | | |
54 | | static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line) |
55 | 9.84M | { |
56 | 9.84M | if ((HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) == HT_OK) { |
57 | 9.84M | return; |
58 | 9.84M | } |
59 | 0 | switch (HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) { |
60 | 0 | case HT_IS_DESTROYING: |
61 | 0 | zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht); |
62 | 0 | break; |
63 | 0 | case HT_DESTROYED: |
64 | 0 | zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht); |
65 | 0 | break; |
66 | 0 | case HT_CLEANING: |
67 | 0 | zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht); |
68 | 0 | break; |
69 | 0 | default: |
70 | 0 | zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht); |
71 | 0 | break; |
72 | 0 | } |
73 | 0 | ZEND_UNREACHABLE(); |
74 | 0 | } |
75 | 9.84M | #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__); |
76 | 1.56M | #define SET_INCONSISTENT(n) do { \ |
77 | 1.56M | HT_FLAGS(ht) = (HT_FLAGS(ht) & ~HASH_FLAG_CONSISTENCY) | (n); \ |
78 | 1.56M | } while (0) |
79 | | #else |
80 | | #define IS_CONSISTENT(a) |
81 | | #define SET_INCONSISTENT(n) |
82 | | #endif |
83 | | |
84 | | #define ZEND_HASH_IF_FULL_DO_RESIZE(ht) \ |
85 | 1.46M | if ((ht)->nNumUsed >= (ht)->nTableSize) { \ |
86 | 1.10k | zend_hash_do_resize(ht); \ |
87 | 1.10k | } |
88 | | |
89 | 209k | ZEND_API void *zend_hash_str_find_ptr_lc(const HashTable *ht, const char *str, size_t len) { |
90 | 209k | void *result; |
91 | 209k | char *lc_str; |
92 | | |
93 | | /* Stack allocate small strings to improve performance */ |
94 | 209k | ALLOCA_FLAG(use_heap) |
95 | | |
96 | 209k | lc_str = zend_str_tolower_copy(do_alloca(len + 1, use_heap), str, len); |
97 | 209k | result = zend_hash_str_find_ptr(ht, lc_str, len); |
98 | 209k | free_alloca(lc_str, use_heap); |
99 | | |
100 | 209k | return result; |
101 | 209k | } |
102 | | |
103 | 0 | ZEND_API void *zend_hash_find_ptr_lc(const HashTable *ht, zend_string *key) { |
104 | 0 | void *result; |
105 | 0 | zend_string *lc_key = zend_string_tolower(key); |
106 | 0 | result = zend_hash_find_ptr(ht, lc_key); |
107 | 0 | zend_string_release(lc_key); |
108 | 0 | return result; |
109 | 0 | } |
110 | | |
111 | | static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht); |
112 | | |
113 | | static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize) |
114 | 1.93M | { |
115 | | #ifdef ZEND_WIN32 |
116 | | unsigned long index; |
117 | | #endif |
118 | | |
119 | | /* Use big enough power of 2 */ |
120 | | /* size should be between HT_MIN_SIZE and HT_MAX_SIZE */ |
121 | 1.93M | if (nSize <= HT_MIN_SIZE) { |
122 | 1.59M | return HT_MIN_SIZE; |
123 | 1.59M | } else if (UNEXPECTED(nSize > HT_MAX_SIZE)) { |
124 | 0 | zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket)); |
125 | 0 | } |
126 | | |
127 | | #ifdef ZEND_WIN32 |
128 | | if (BitScanReverse(&index, nSize - 1)) { |
129 | | return 0x2u << ((31 - index) ^ 0x1f); |
130 | | } else { |
131 | | /* nSize is ensured to be in the valid range, fall back to it |
132 | | rather than using an undefined bis scan result. */ |
133 | | return nSize; |
134 | | } |
135 | | #elif (defined(__GNUC__) || __has_builtin(__builtin_clz)) && defined(PHP_HAVE_BUILTIN_CLZ) |
136 | 344k | return 0x2u << (__builtin_clz(nSize - 1) ^ 0x1f); |
137 | | #else |
138 | | nSize -= 1; |
139 | | nSize |= (nSize >> 1); |
140 | | nSize |= (nSize >> 2); |
141 | | nSize |= (nSize >> 4); |
142 | | nSize |= (nSize >> 8); |
143 | | nSize |= (nSize >> 16); |
144 | | return nSize + 1; |
145 | | #endif |
146 | 1.93M | } |
147 | | |
148 | | static zend_always_inline void zend_hash_real_init_packed_ex(HashTable *ht) |
149 | 86.3k | { |
150 | 86.3k | void *data; |
151 | | |
152 | 86.3k | if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) { |
153 | 144 | data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), 1); |
154 | 86.2k | } else if (EXPECTED(ht->nTableSize == HT_MIN_SIZE)) { |
155 | | /* Use specialized API with constant allocation amount for a particularly common case. */ |
156 | 86.2k | data = emalloc(HT_PACKED_SIZE_EX(HT_MIN_SIZE, HT_MIN_MASK)); |
157 | 86.2k | } else { |
158 | 0 | data = emalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK)); |
159 | 0 | } |
160 | 86.3k | HT_SET_DATA_ADDR(ht, data); |
161 | | /* Don't overwrite iterator count. */ |
162 | 86.3k | ht->u.v.flags = HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS; |
163 | 86.3k | HT_HASH_RESET_PACKED(ht); |
164 | 86.3k | } |
165 | | |
166 | | static zend_always_inline void zend_hash_real_init_mixed_ex(HashTable *ht) |
167 | 701k | { |
168 | 701k | void *data; |
169 | 701k | uint32_t nSize = ht->nTableSize; |
170 | | |
171 | 701k | ZEND_ASSERT(HT_SIZE_TO_MASK(nSize)); |
172 | | |
173 | 701k | if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) { |
174 | 596 | data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), 1); |
175 | 701k | } else if (EXPECTED(nSize == HT_MIN_SIZE)) { |
176 | 509k | data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_SIZE_TO_MASK(HT_MIN_SIZE))); |
177 | 509k | ht->nTableMask = HT_SIZE_TO_MASK(HT_MIN_SIZE); |
178 | 509k | HT_SET_DATA_ADDR(ht, data); |
179 | | /* Don't overwrite iterator count. */ |
180 | 509k | ht->u.v.flags = HASH_FLAG_STATIC_KEYS; |
181 | | #if defined(__AVX2__) |
182 | | do { |
183 | | __m256i ymm0 = _mm256_setzero_si256(); |
184 | | ymm0 = _mm256_cmpeq_epi64(ymm0, ymm0); |
185 | | _mm256_storeu_si256((__m256i*)&HT_HASH_EX(data, 0), ymm0); |
186 | | _mm256_storeu_si256((__m256i*)&HT_HASH_EX(data, 8), ymm0); |
187 | | } while(0); |
188 | | #elif defined (__SSE2__) |
189 | 509k | do { |
190 | 509k | __m128i xmm0 = _mm_setzero_si128(); |
191 | 509k | xmm0 = _mm_cmpeq_epi8(xmm0, xmm0); |
192 | 509k | _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 0), xmm0); |
193 | 509k | _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 4), xmm0); |
194 | 509k | _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 8), xmm0); |
195 | 509k | _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 12), xmm0); |
196 | 509k | } while (0); |
197 | | #elif defined(__aarch64__) || defined(_M_ARM64) |
198 | | do { |
199 | | int32x4_t t = vdupq_n_s32(-1); |
200 | | vst1q_s32((int32_t*)&HT_HASH_EX(data, 0), t); |
201 | | vst1q_s32((int32_t*)&HT_HASH_EX(data, 4), t); |
202 | | vst1q_s32((int32_t*)&HT_HASH_EX(data, 8), t); |
203 | | vst1q_s32((int32_t*)&HT_HASH_EX(data, 12), t); |
204 | | } while (0); |
205 | | #else |
206 | | HT_HASH_EX(data, 0) = -1; |
207 | | HT_HASH_EX(data, 1) = -1; |
208 | | HT_HASH_EX(data, 2) = -1; |
209 | | HT_HASH_EX(data, 3) = -1; |
210 | | HT_HASH_EX(data, 4) = -1; |
211 | | HT_HASH_EX(data, 5) = -1; |
212 | | HT_HASH_EX(data, 6) = -1; |
213 | | HT_HASH_EX(data, 7) = -1; |
214 | | HT_HASH_EX(data, 8) = -1; |
215 | | HT_HASH_EX(data, 9) = -1; |
216 | | HT_HASH_EX(data, 10) = -1; |
217 | | HT_HASH_EX(data, 11) = -1; |
218 | | HT_HASH_EX(data, 12) = -1; |
219 | | HT_HASH_EX(data, 13) = -1; |
220 | | HT_HASH_EX(data, 14) = -1; |
221 | | HT_HASH_EX(data, 15) = -1; |
222 | | #endif |
223 | 509k | return; |
224 | 509k | } else { |
225 | 192k | data = emalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize))); |
226 | 192k | } |
227 | 192k | ht->nTableMask = HT_SIZE_TO_MASK(nSize); |
228 | 192k | HT_SET_DATA_ADDR(ht, data); |
229 | 192k | HT_FLAGS(ht) = HASH_FLAG_STATIC_KEYS; |
230 | 192k | HT_HASH_RESET(ht); |
231 | 192k | } |
232 | | |
233 | | static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, bool packed) |
234 | 895 | { |
235 | 895 | HT_ASSERT_RC1(ht); |
236 | 895 | ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED); |
237 | 895 | if (packed) { |
238 | 0 | zend_hash_real_init_packed_ex(ht); |
239 | 895 | } else { |
240 | 895 | zend_hash_real_init_mixed_ex(ht); |
241 | 895 | } |
242 | 895 | } |
243 | | |
244 | | static const uint32_t uninitialized_bucket[-HT_MIN_MASK] = |
245 | | {HT_INVALID_IDX, HT_INVALID_IDX}; |
246 | | |
247 | | ZEND_API const HashTable zend_empty_array = { |
248 | | .gc.refcount = 2, |
249 | | .gc.u.type_info = IS_ARRAY | (GC_IMMUTABLE << GC_FLAGS_SHIFT), |
250 | | .u.flags = HASH_FLAG_UNINITIALIZED, |
251 | | .nTableMask = HT_MIN_MASK, |
252 | | {.arData = (Bucket*)&uninitialized_bucket[2]}, |
253 | | .nNumUsed = 0, |
254 | | .nNumOfElements = 0, |
255 | | .nTableSize = HT_MIN_SIZE, |
256 | | .nInternalPointer = 0, |
257 | | .nNextFreeElement = ZEND_LONG_MIN, |
258 | | .pDestructor = ZVAL_PTR_DTOR |
259 | | }; |
260 | | |
261 | | static zend_always_inline void _zend_hash_init_int(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent) |
262 | 1.83M | { |
263 | 1.83M | GC_SET_REFCOUNT(ht, 1); |
264 | 1.83M | GC_TYPE_INFO(ht) = GC_ARRAY | (persistent ? ((GC_PERSISTENT|GC_NOT_COLLECTABLE) << GC_FLAGS_SHIFT) : 0); |
265 | 1.83M | HT_FLAGS(ht) = HASH_FLAG_UNINITIALIZED; |
266 | 1.83M | ht->nTableMask = HT_MIN_MASK; |
267 | 1.83M | HT_SET_DATA_ADDR(ht, &uninitialized_bucket); |
268 | 1.83M | ht->nNumUsed = 0; |
269 | 1.83M | ht->nNumOfElements = 0; |
270 | 1.83M | ht->nInternalPointer = 0; |
271 | 1.83M | ht->nNextFreeElement = ZEND_LONG_MIN; |
272 | 1.83M | ht->pDestructor = pDestructor; |
273 | 1.83M | ht->nTableSize = zend_hash_check_size(nSize); |
274 | 1.83M | } |
275 | | |
276 | | ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent) |
277 | 417k | { |
278 | 417k | _zend_hash_init_int(ht, nSize, pDestructor, persistent); |
279 | 417k | } |
280 | | |
281 | | ZEND_API HashTable* ZEND_FASTCALL _zend_new_array_0(void) |
282 | 0 | { |
283 | 0 | HashTable *ht = emalloc(sizeof(HashTable)); |
284 | 0 | _zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, false); |
285 | 0 | return ht; |
286 | 0 | } |
287 | | |
288 | | ZEND_API HashTable* ZEND_FASTCALL _zend_new_array(uint32_t nSize) |
289 | 1.41M | { |
290 | 1.41M | HashTable *ht = emalloc(sizeof(HashTable)); |
291 | 1.41M | _zend_hash_init_int(ht, nSize, ZVAL_PTR_DTOR, false); |
292 | 1.41M | return ht; |
293 | 1.41M | } |
294 | | |
295 | | ZEND_API HashTable* ZEND_FASTCALL zend_new_pair(const zval *val1, const zval *val2) |
296 | 0 | { |
297 | 0 | zval *zv; |
298 | 0 | HashTable *ht = emalloc(sizeof(HashTable)); |
299 | 0 | _zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, false); |
300 | 0 | ht->nNumUsed = ht->nNumOfElements = ht->nNextFreeElement = 2; |
301 | 0 | zend_hash_real_init_packed_ex(ht); |
302 | |
|
303 | 0 | zv = ht->arPacked; |
304 | 0 | ZVAL_COPY_VALUE(zv, val1); |
305 | 0 | zv++; |
306 | 0 | ZVAL_COPY_VALUE(zv, val2); |
307 | 0 | return ht; |
308 | 0 | } |
309 | | |
310 | | ZEND_API void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht) |
311 | 0 | { |
312 | 0 | HT_ASSERT_RC1(ht); |
313 | 0 | if (ht->nTableSize >= HT_MAX_SIZE) { |
314 | 0 | zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket)); |
315 | 0 | } |
316 | 0 | uint32_t newTableSize = ht->nTableSize * 2; |
317 | 0 | HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_PACKED_SIZE_EX(newTableSize, HT_MIN_MASK), HT_PACKED_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)); |
318 | 0 | ht->nTableSize = newTableSize; |
319 | 0 | } |
320 | | |
321 | | ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, bool packed) |
322 | 895 | { |
323 | 895 | IS_CONSISTENT(ht); |
324 | | |
325 | 895 | HT_ASSERT_RC1(ht); |
326 | 895 | zend_hash_real_init_ex(ht, packed); |
327 | 895 | } |
328 | | |
329 | | ZEND_API void ZEND_FASTCALL zend_hash_real_init_packed(HashTable *ht) |
330 | 40.5k | { |
331 | 40.5k | IS_CONSISTENT(ht); |
332 | | |
333 | 40.5k | HT_ASSERT_RC1(ht); |
334 | 40.5k | zend_hash_real_init_packed_ex(ht); |
335 | 40.5k | } |
336 | | |
337 | | ZEND_API void ZEND_FASTCALL zend_hash_real_init_mixed(HashTable *ht) |
338 | 700k | { |
339 | 700k | IS_CONSISTENT(ht); |
340 | | |
341 | 700k | HT_ASSERT_RC1(ht); |
342 | 700k | zend_hash_real_init_mixed_ex(ht); |
343 | 700k | } |
344 | | |
345 | | ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht) |
346 | 3.10k | { |
347 | 3.10k | void *new_data, *old_data = HT_GET_DATA_ADDR(ht); |
348 | 3.10k | zval *src = ht->arPacked; |
349 | 3.10k | Bucket *dst; |
350 | 3.10k | uint32_t i; |
351 | 3.10k | uint32_t nSize = ht->nTableSize; |
352 | | |
353 | 3.10k | ZEND_ASSERT(HT_SIZE_TO_MASK(nSize)); |
354 | | |
355 | 3.10k | HT_ASSERT_RC1(ht); |
356 | | // Alloc before assign to avoid inconsistencies on OOM |
357 | 3.10k | new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT); |
358 | 3.10k | HT_FLAGS(ht) &= ~HASH_FLAG_PACKED; |
359 | 3.10k | ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize); |
360 | 3.10k | HT_SET_DATA_ADDR(ht, new_data); |
361 | 3.10k | dst = ht->arData; |
362 | 13.6k | for (i = 0; i < ht->nNumUsed; i++) { |
363 | 10.5k | ZVAL_COPY_VALUE(&dst->val, src); |
364 | 10.5k | dst->h = i; |
365 | 10.5k | dst->key = NULL; |
366 | 10.5k | dst++; |
367 | 10.5k | src++; |
368 | 10.5k | } |
369 | 3.10k | pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT); |
370 | 3.10k | zend_hash_rehash(ht); |
371 | 3.10k | } |
372 | | |
373 | | ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht) |
374 | 0 | { |
375 | 0 | void *new_data, *old_data = HT_GET_DATA_ADDR(ht); |
376 | 0 | Bucket *src = ht->arData; |
377 | 0 | zval *dst; |
378 | 0 | uint32_t i; |
379 | |
|
380 | 0 | HT_ASSERT_RC1(ht); |
381 | 0 | new_data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT); |
382 | 0 | HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS; |
383 | 0 | ht->nTableMask = HT_MIN_MASK; |
384 | 0 | HT_SET_DATA_ADDR(ht, new_data); |
385 | 0 | HT_HASH_RESET_PACKED(ht); |
386 | 0 | dst = ht->arPacked; |
387 | 0 | for (i = 0; i < ht->nNumUsed; i++) { |
388 | 0 | ZVAL_COPY_VALUE(dst, &src->val); |
389 | 0 | dst++; |
390 | 0 | src++; |
391 | 0 | } |
392 | 0 | pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT); |
393 | 0 | } |
394 | | |
395 | | ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, bool packed) |
396 | 134k | { |
397 | 134k | HT_ASSERT_RC1(ht); |
398 | | |
399 | 134k | if (nSize == 0) return; |
400 | | |
401 | 133k | ZEND_ASSERT(HT_SIZE_TO_MASK(nSize)); |
402 | | |
403 | 133k | if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) { |
404 | 895 | if (nSize > ht->nTableSize) { |
405 | 123 | ht->nTableSize = zend_hash_check_size(nSize); |
406 | 123 | } |
407 | 895 | zend_hash_real_init(ht, packed); |
408 | 133k | } else { |
409 | 133k | if (packed) { |
410 | 0 | ZEND_ASSERT(HT_IS_PACKED(ht)); |
411 | 0 | if (nSize > ht->nTableSize) { |
412 | 0 | uint32_t newTableSize = zend_hash_check_size(nSize); |
413 | 0 | HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_PACKED_SIZE_EX(newTableSize, HT_MIN_MASK), HT_PACKED_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)); |
414 | 0 | ht->nTableSize = newTableSize; |
415 | 0 | } |
416 | 133k | } else { |
417 | 133k | ZEND_ASSERT(!HT_IS_PACKED(ht)); |
418 | 133k | if (nSize > ht->nTableSize) { |
419 | 103k | void *new_data, *old_data = HT_GET_DATA_ADDR(ht); |
420 | 103k | Bucket *old_buckets = ht->arData; |
421 | 103k | nSize = zend_hash_check_size(nSize); |
422 | 103k | new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT); |
423 | 103k | ht->nTableSize = nSize; |
424 | 103k | ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize); |
425 | 103k | HT_SET_DATA_ADDR(ht, new_data); |
426 | 103k | memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); |
427 | 103k | pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT); |
428 | 103k | zend_hash_rehash(ht); |
429 | 103k | } |
430 | 133k | } |
431 | 133k | } |
432 | 133k | } |
433 | | |
434 | | ZEND_API void ZEND_FASTCALL zend_hash_discard(HashTable *ht, uint32_t nNumUsed) |
435 | 0 | { |
436 | 0 | Bucket *p, *end, *arData; |
437 | 0 | uint32_t nIndex; |
438 | |
|
439 | 0 | ZEND_ASSERT(!HT_IS_PACKED(ht)); |
440 | 0 | arData = ht->arData; |
441 | 0 | p = arData + ht->nNumUsed; |
442 | 0 | end = arData + nNumUsed; |
443 | 0 | ht->nNumUsed = nNumUsed; |
444 | 0 | while (p != end) { |
445 | 0 | p--; |
446 | 0 | if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue; |
447 | 0 | ht->nNumOfElements--; |
448 | | /* Collision pointers always directed from higher to lower buckets */ |
449 | | #if 0 |
450 | | if (!(Z_NEXT(p->val) == HT_INVALID_IDX || HT_HASH_TO_BUCKET_EX(arData, Z_NEXT(p->val)) < p)) { |
451 | | abort(); |
452 | | } |
453 | | #endif |
454 | 0 | nIndex = p->h | ht->nTableMask; |
455 | 0 | HT_HASH_EX(arData, nIndex) = Z_NEXT(p->val); |
456 | 0 | } |
457 | 0 | } |
458 | | |
459 | | static uint32_t zend_array_recalc_elements(const HashTable *ht) |
460 | 0 | { |
461 | 0 | zval *val; |
462 | 0 | uint32_t num = ht->nNumOfElements; |
463 | |
|
464 | 0 | ZEND_HASH_MAP_FOREACH_VAL(ht, val) { |
465 | 0 | if (Z_TYPE_P(val) == IS_INDIRECT) { |
466 | 0 | if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) { |
467 | 0 | num--; |
468 | 0 | } |
469 | 0 | } |
470 | 0 | } ZEND_HASH_FOREACH_END(); |
471 | 0 | return num; |
472 | 0 | } |
473 | | /* }}} */ |
474 | | |
475 | | ZEND_API uint32_t zend_array_count(HashTable *ht) |
476 | 0 | { |
477 | 0 | uint32_t num; |
478 | 0 | if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_HAS_EMPTY_IND)) { |
479 | 0 | num = zend_array_recalc_elements(ht); |
480 | 0 | if (UNEXPECTED(ht->nNumOfElements == num)) { |
481 | 0 | HT_FLAGS(ht) &= ~HASH_FLAG_HAS_EMPTY_IND; |
482 | 0 | } |
483 | 0 | } else if (UNEXPECTED(ht == &EG(symbol_table))) { |
484 | 0 | num = zend_array_recalc_elements(ht); |
485 | 0 | } else { |
486 | 0 | num = zend_hash_num_elements(ht); |
487 | 0 | } |
488 | 0 | return num; |
489 | 0 | } |
490 | | /* }}} */ |
491 | | |
492 | | static zend_always_inline HashPosition _zend_hash_get_valid_pos(const HashTable *ht, HashPosition pos) |
493 | 0 | { |
494 | 0 | if (HT_IS_PACKED(ht)) { |
495 | 0 | while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arPacked[pos])) { |
496 | 0 | pos++; |
497 | 0 | } |
498 | 0 | } else { |
499 | 0 | while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arData[pos].val)) { |
500 | 0 | pos++; |
501 | 0 | } |
502 | 0 | } |
503 | 0 | return pos; |
504 | 0 | } |
505 | | |
506 | | static zend_always_inline HashPosition _zend_hash_get_current_pos(const HashTable *ht) |
507 | 0 | { |
508 | 0 | return _zend_hash_get_valid_pos(ht, ht->nInternalPointer); |
509 | 0 | } |
510 | | |
511 | | ZEND_API HashPosition ZEND_FASTCALL zend_hash_get_current_pos(const HashTable *ht) |
512 | 0 | { |
513 | 0 | return _zend_hash_get_current_pos(ht); |
514 | 0 | } |
515 | | |
516 | | ZEND_API HashPosition ZEND_FASTCALL zend_hash_get_current_pos_ex(const HashTable *ht, HashPosition pos) |
517 | 0 | { |
518 | 0 | return _zend_hash_get_valid_pos(ht, pos); |
519 | 0 | } |
520 | | |
521 | 0 | static void zend_hash_remove_iterator_copies(uint32_t idx) { |
522 | 0 | HashTableIterator *iterators = EG(ht_iterators); |
523 | |
|
524 | 0 | HashTableIterator *iter = iterators + idx; |
525 | 0 | uint32_t next_idx = iter->next_copy; |
526 | 0 | while (next_idx != idx) { |
527 | 0 | uint32_t cur_idx = next_idx; |
528 | 0 | HashTableIterator *cur_iter = iterators + cur_idx; |
529 | 0 | next_idx = cur_iter->next_copy; |
530 | 0 | cur_iter->next_copy = cur_idx; // avoid recursion in zend_hash_iterator_del |
531 | 0 | zend_hash_iterator_del(cur_idx); |
532 | 0 | } |
533 | 0 | iter->next_copy = idx; |
534 | 0 | } |
535 | | |
536 | | ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos) |
537 | 0 | { |
538 | 0 | HashTableIterator *iter = EG(ht_iterators); |
539 | 0 | HashTableIterator *end = iter + EG(ht_iterators_count); |
540 | 0 | uint32_t idx; |
541 | |
|
542 | 0 | if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) { |
543 | 0 | HT_INC_ITERATORS_COUNT(ht); |
544 | 0 | } |
545 | 0 | while (iter != end) { |
546 | 0 | if (iter->ht == NULL) { |
547 | 0 | iter->ht = ht; |
548 | 0 | iter->pos = pos; |
549 | 0 | idx = iter - EG(ht_iterators); |
550 | 0 | iter->next_copy = idx; |
551 | 0 | if (idx + 1 > EG(ht_iterators_used)) { |
552 | 0 | EG(ht_iterators_used) = idx + 1; |
553 | 0 | } |
554 | 0 | return idx; |
555 | 0 | } |
556 | 0 | iter++; |
557 | 0 | } |
558 | 0 | if (EG(ht_iterators) == EG(ht_iterators_slots)) { |
559 | 0 | EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8)); |
560 | 0 | memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count)); |
561 | 0 | } else { |
562 | 0 | EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8)); |
563 | 0 | } |
564 | 0 | iter = EG(ht_iterators) + EG(ht_iterators_count); |
565 | 0 | EG(ht_iterators_count) += 8; |
566 | 0 | iter->ht = ht; |
567 | 0 | iter->pos = pos; |
568 | 0 | memset(iter + 1, 0, sizeof(HashTableIterator) * 7); |
569 | 0 | idx = iter - EG(ht_iterators); |
570 | 0 | iter->next_copy = idx; |
571 | 0 | EG(ht_iterators_used) = idx + 1; |
572 | 0 | return idx; |
573 | 0 | } |
574 | | |
575 | | // To avoid losing track of the HashTable when separating arrays, we track all copies at once. |
576 | 0 | static zend_always_inline bool zend_hash_iterator_find_copy_pos(uint32_t idx, HashTable *ht) { |
577 | 0 | HashTableIterator *iter = EG(ht_iterators) + idx; |
578 | |
|
579 | 0 | uint32_t next_idx = iter->next_copy; |
580 | 0 | if (EXPECTED(next_idx != idx)) { |
581 | 0 | HashTableIterator *copy_iter; |
582 | 0 | while (next_idx != idx) { |
583 | 0 | copy_iter = EG(ht_iterators) + next_idx; |
584 | 0 | if (copy_iter->ht == ht) { |
585 | | // We have found the hashtable we are actually iterating over |
586 | | // Now clean any intermittent copies and replace the original index by the found one |
587 | 0 | if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR) |
588 | 0 | && EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) { |
589 | 0 | HT_DEC_ITERATORS_COUNT(iter->ht); |
590 | 0 | } |
591 | 0 | if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) { |
592 | 0 | HT_INC_ITERATORS_COUNT(ht); |
593 | 0 | } |
594 | 0 | iter->ht = copy_iter->ht; |
595 | 0 | iter->pos = copy_iter->pos; |
596 | 0 | zend_hash_remove_iterator_copies(idx); |
597 | 0 | return true; |
598 | 0 | } |
599 | 0 | next_idx = copy_iter->next_copy; |
600 | 0 | } |
601 | 0 | zend_hash_remove_iterator_copies(idx); |
602 | 0 | } |
603 | | |
604 | 0 | return false; |
605 | 0 | } |
606 | | |
607 | | ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht) |
608 | 0 | { |
609 | 0 | HashTableIterator *iter = EG(ht_iterators) + idx; |
610 | |
|
611 | 0 | ZEND_ASSERT(idx != (uint32_t)-1); |
612 | 0 | if (UNEXPECTED(iter->ht != ht) && !zend_hash_iterator_find_copy_pos(idx, ht)) { |
613 | 0 | if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR) |
614 | 0 | && EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) { |
615 | 0 | HT_DEC_ITERATORS_COUNT(iter->ht); |
616 | 0 | } |
617 | 0 | if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) { |
618 | 0 | HT_INC_ITERATORS_COUNT(ht); |
619 | 0 | } |
620 | 0 | iter->ht = ht; |
621 | 0 | iter->pos = _zend_hash_get_current_pos(ht); |
622 | 0 | } |
623 | 0 | return iter->pos; |
624 | 0 | } |
625 | | |
626 | | ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array) |
627 | 0 | { |
628 | 0 | HashTable *ht = Z_ARRVAL_P(array); |
629 | 0 | HashTableIterator *iter = EG(ht_iterators) + idx; |
630 | |
|
631 | 0 | ZEND_ASSERT(idx != (uint32_t)-1); |
632 | 0 | if (UNEXPECTED(iter->ht != ht) && !zend_hash_iterator_find_copy_pos(idx, ht)) { |
633 | 0 | if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR) |
634 | 0 | && EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) { |
635 | 0 | HT_DEC_ITERATORS_COUNT(iter->ht); |
636 | 0 | } |
637 | | |
638 | | /* Inlined SEPARATE_ARRAY() with updating of iterator when EG(ht_iterators) grows. */ |
639 | 0 | if (UNEXPECTED(GC_REFCOUNT(ht) > 1)) { |
640 | 0 | ZVAL_ARR(array, zend_array_dup(ht)); |
641 | 0 | GC_TRY_DELREF(ht); |
642 | 0 | iter = EG(ht_iterators) + idx; |
643 | 0 | ht = Z_ARRVAL_P(array); |
644 | 0 | } |
645 | |
|
646 | 0 | if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) { |
647 | 0 | HT_INC_ITERATORS_COUNT(ht); |
648 | 0 | } |
649 | 0 | iter->ht = ht; |
650 | 0 | iter->pos = _zend_hash_get_current_pos(ht); |
651 | 0 | } |
652 | 0 | return iter->pos; |
653 | 0 | } |
654 | | |
655 | | ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx) |
656 | 0 | { |
657 | 0 | HashTableIterator *iter = EG(ht_iterators) + idx; |
658 | |
|
659 | 0 | ZEND_ASSERT(idx != (uint32_t)-1); |
660 | |
|
661 | 0 | if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR) |
662 | 0 | && EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) { |
663 | 0 | ZEND_ASSERT(HT_ITERATORS_COUNT(iter->ht) != 0); |
664 | 0 | HT_DEC_ITERATORS_COUNT(iter->ht); |
665 | 0 | } |
666 | 0 | iter->ht = NULL; |
667 | |
|
668 | 0 | if (UNEXPECTED(iter->next_copy != idx)) { |
669 | 0 | zend_hash_remove_iterator_copies(idx); |
670 | 0 | } |
671 | |
|
672 | 0 | if (idx == EG(ht_iterators_used) - 1) { |
673 | 0 | while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) { |
674 | 0 | idx--; |
675 | 0 | } |
676 | 0 | EG(ht_iterators_used) = idx; |
677 | 0 | } |
678 | 0 | } |
679 | | |
680 | | static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(const HashTable *ht) |
681 | 0 | { |
682 | 0 | HashTableIterator *iter = EG(ht_iterators); |
683 | 0 | const HashTableIterator *end = iter + EG(ht_iterators_used); |
684 | |
|
685 | 0 | while (iter != end) { |
686 | 0 | if (iter->ht == ht) { |
687 | 0 | iter->ht = HT_POISONED_PTR; |
688 | 0 | } |
689 | 0 | iter++; |
690 | 0 | } |
691 | 0 | } |
692 | | |
693 | | static zend_always_inline void zend_hash_iterators_remove(const HashTable *ht) |
694 | 1.42M | { |
695 | 1.42M | if (UNEXPECTED(HT_HAS_ITERATORS(ht))) { |
696 | 0 | _zend_hash_iterators_remove(ht); |
697 | 0 | } |
698 | 1.42M | } |
699 | | |
700 | | ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(const HashTable *ht, HashPosition start) |
701 | 0 | { |
702 | 0 | const HashTableIterator *iter = EG(ht_iterators); |
703 | 0 | const HashTableIterator *end = iter + EG(ht_iterators_used); |
704 | 0 | HashPosition res = ht->nNumUsed; |
705 | |
|
706 | 0 | while (iter != end) { |
707 | 0 | if (iter->ht == ht) { |
708 | 0 | if (iter->pos >= start && iter->pos < res) { |
709 | 0 | res = iter->pos; |
710 | 0 | } |
711 | 0 | } |
712 | 0 | iter++; |
713 | 0 | } |
714 | 0 | return res; |
715 | 0 | } |
716 | | |
717 | | ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(const HashTable *ht, HashPosition from, HashPosition to) |
718 | 0 | { |
719 | 0 | HashTableIterator *iter = EG(ht_iterators); |
720 | 0 | const HashTableIterator *end = iter + EG(ht_iterators_used); |
721 | |
|
722 | 0 | while (iter != end) { |
723 | 0 | if (iter->ht == ht && iter->pos == from) { |
724 | 0 | iter->pos = to; |
725 | 0 | } |
726 | 0 | iter++; |
727 | 0 | } |
728 | 0 | } |
729 | | |
730 | | ZEND_API void ZEND_FASTCALL zend_hash_iterators_advance(const HashTable *ht, HashPosition step) |
731 | 0 | { |
732 | 0 | HashTableIterator *iter = EG(ht_iterators); |
733 | 0 | const HashTableIterator *end = iter + EG(ht_iterators_used); |
734 | |
|
735 | 0 | while (iter != end) { |
736 | 0 | if (iter->ht == ht) { |
737 | 0 | iter->pos += step; |
738 | 0 | } |
739 | 0 | iter++; |
740 | 0 | } |
741 | 0 | } |
742 | | |
743 | | /* Hash must be known and precomputed before */ |
744 | | static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, const zend_string *key) |
745 | 2.68M | { |
746 | 2.68M | uint32_t nIndex; |
747 | 2.68M | uint32_t idx; |
748 | 2.68M | Bucket *p, *arData; |
749 | | |
750 | 2.68M | ZEND_ASSERT(ZSTR_H(key) != 0 && "Hash must be known"); |
751 | | |
752 | 2.68M | arData = ht->arData; |
753 | 2.68M | nIndex = ZSTR_H(key) | ht->nTableMask; |
754 | 2.68M | idx = HT_HASH_EX(arData, nIndex); |
755 | | |
756 | 2.68M | if (UNEXPECTED(idx == HT_INVALID_IDX)) { |
757 | 1.04M | return NULL; |
758 | 1.04M | } |
759 | 1.64M | p = HT_HASH_TO_BUCKET_EX(arData, idx); |
760 | 1.64M | if (EXPECTED(p->key == key)) { /* check for the same interned string */ |
761 | 687k | return p; |
762 | 687k | } |
763 | | |
764 | 962k | while (1) { |
765 | 962k | if (p->h == ZSTR_H(key) && |
766 | 621k | EXPECTED(p->key) && |
767 | 621k | zend_string_equal_content(p->key, key)) { |
768 | 621k | return p; |
769 | 621k | } |
770 | 340k | idx = Z_NEXT(p->val); |
771 | 340k | if (idx == HT_INVALID_IDX) { |
772 | 319k | return NULL; |
773 | 319k | } |
774 | 21.1k | p = HT_HASH_TO_BUCKET_EX(arData, idx); |
775 | 21.1k | if (p->key == key) { /* check for the same interned string */ |
776 | 11.8k | return p; |
777 | 11.8k | } |
778 | 21.1k | } |
779 | 953k | } |
780 | | |
781 | | static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h) |
782 | 2.97M | { |
783 | 2.97M | uint32_t nIndex; |
784 | 2.97M | uint32_t idx; |
785 | 2.97M | Bucket *p, *arData; |
786 | | |
787 | 2.97M | arData = ht->arData; |
788 | 2.97M | nIndex = h | ht->nTableMask; |
789 | 2.97M | idx = HT_HASH_EX(arData, nIndex); |
790 | 3.00M | while (idx != HT_INVALID_IDX) { |
791 | 443k | ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize)); |
792 | 443k | p = HT_HASH_TO_BUCKET_EX(arData, idx); |
793 | 443k | if ((p->h == h) |
794 | 418k | && p->key |
795 | 418k | && zend_string_equals_cstr(p->key, str, len)) { |
796 | 418k | return p; |
797 | 418k | } |
798 | 24.8k | idx = Z_NEXT(p->val); |
799 | 24.8k | } |
800 | 2.56M | return NULL; |
801 | 2.97M | } |
802 | | |
803 | | static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h) |
804 | 960k | { |
805 | 960k | uint32_t nIndex; |
806 | 960k | uint32_t idx; |
807 | 960k | Bucket *p, *arData; |
808 | | |
809 | 960k | arData = ht->arData; |
810 | 960k | nIndex = h | ht->nTableMask; |
811 | 960k | idx = HT_HASH_EX(arData, nIndex); |
812 | 1.45M | while (idx != HT_INVALID_IDX) { |
813 | 660k | ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize)); |
814 | 660k | p = HT_HASH_TO_BUCKET_EX(arData, idx); |
815 | 660k | if (p->h == h && !p->key) { |
816 | 162k | return p; |
817 | 162k | } |
818 | 497k | idx = Z_NEXT(p->val); |
819 | 497k | } |
820 | 798k | return NULL; |
821 | 960k | } |
822 | | |
823 | | static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag) |
824 | 877k | { |
825 | 877k | zend_ulong h; |
826 | 877k | uint32_t nIndex; |
827 | 877k | uint32_t idx; |
828 | 877k | Bucket *p, *arData; |
829 | | |
830 | 877k | IS_CONSISTENT(ht); |
831 | 877k | HT_ASSERT_RC1(ht); |
832 | 877k | zend_string_hash_val(key); |
833 | | |
834 | 877k | if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) { |
835 | 69.2k | if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) { |
836 | 69.2k | zend_hash_real_init_mixed(ht); |
837 | 69.2k | goto add_to_hash; |
838 | 69.2k | } else { |
839 | 0 | zend_hash_packed_to_hash(ht); |
840 | 0 | } |
841 | 808k | } else if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) { |
842 | 808k | p = zend_hash_find_bucket(ht, key); |
843 | | |
844 | 808k | if (p) { |
845 | 127k | zval *data; |
846 | | |
847 | 127k | ZEND_ASSERT((flag & HASH_ADD_NEW) == 0); |
848 | 127k | if (flag & HASH_LOOKUP) { |
849 | 127k | return &p->val; |
850 | 127k | } else if (flag & HASH_ADD) { |
851 | 0 | if (!(flag & HASH_UPDATE_INDIRECT)) { |
852 | 0 | return NULL; |
853 | 0 | } |
854 | 0 | ZEND_ASSERT(&p->val != pData); |
855 | 0 | data = &p->val; |
856 | 0 | if (Z_TYPE_P(data) == IS_INDIRECT) { |
857 | 0 | data = Z_INDIRECT_P(data); |
858 | 0 | if (Z_TYPE_P(data) != IS_UNDEF) { |
859 | 0 | return NULL; |
860 | 0 | } |
861 | 0 | } else { |
862 | 0 | return NULL; |
863 | 0 | } |
864 | 0 | } else { |
865 | 0 | ZEND_ASSERT(&p->val != pData); |
866 | 0 | data = &p->val; |
867 | 0 | if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) { |
868 | 0 | data = Z_INDIRECT_P(data); |
869 | 0 | } |
870 | 0 | } |
871 | 0 | if (ht->pDestructor) { |
872 | 0 | ht->pDestructor(data); |
873 | 0 | } |
874 | 0 | ZVAL_COPY_VALUE(data, pData); |
875 | 0 | return data; |
876 | 127k | } |
877 | 808k | } |
878 | | |
879 | 681k | ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */ |
880 | | |
881 | 750k | add_to_hash: |
882 | 750k | if (!ZSTR_IS_INTERNED(key)) { |
883 | 399k | zend_string_addref(key); |
884 | 399k | HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS; |
885 | 399k | } |
886 | 750k | idx = ht->nNumUsed++; |
887 | 750k | ht->nNumOfElements++; |
888 | 750k | arData = ht->arData; |
889 | 750k | p = arData + idx; |
890 | 750k | p->key = key; |
891 | 750k | p->h = h = ZSTR_H(key); |
892 | 750k | nIndex = h | ht->nTableMask; |
893 | 750k | Z_NEXT(p->val) = HT_HASH_EX(arData, nIndex); |
894 | 750k | HT_HASH_EX(arData, nIndex) = HT_IDX_TO_HASH(idx); |
895 | 750k | if (flag & HASH_LOOKUP) { |
896 | 207k | ZVAL_NULL(&p->val); |
897 | 543k | } else { |
898 | 543k | ZVAL_COPY_VALUE(&p->val, pData); |
899 | 543k | } |
900 | | |
901 | 750k | return &p->val; |
902 | 681k | } |
903 | | |
904 | | static zend_always_inline zval *_zend_hash_str_add_or_update_i(HashTable *ht, const char *str, size_t len, zend_ulong h, zval *pData, uint32_t flag) |
905 | 90.6k | { |
906 | 90.6k | zend_string *key; |
907 | 90.6k | uint32_t nIndex; |
908 | 90.6k | uint32_t idx; |
909 | 90.6k | Bucket *p; |
910 | | |
911 | 90.6k | IS_CONSISTENT(ht); |
912 | 90.6k | HT_ASSERT_RC1(ht); |
913 | | |
914 | 90.6k | if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) { |
915 | 90.6k | if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) { |
916 | 90.6k | zend_hash_real_init_mixed(ht); |
917 | 90.6k | goto add_to_hash; |
918 | 90.6k | } else { |
919 | 0 | zend_hash_packed_to_hash(ht); |
920 | 0 | } |
921 | 90.6k | } else if ((flag & HASH_ADD_NEW) == 0) { |
922 | 10 | p = zend_hash_str_find_bucket(ht, str, len, h); |
923 | | |
924 | 10 | if (p) { |
925 | 0 | zval *data; |
926 | |
|
927 | 0 | if (flag & HASH_LOOKUP) { |
928 | 0 | return &p->val; |
929 | 0 | } else if (flag & HASH_ADD) { |
930 | 0 | if (!(flag & HASH_UPDATE_INDIRECT)) { |
931 | 0 | return NULL; |
932 | 0 | } |
933 | 0 | ZEND_ASSERT(&p->val != pData); |
934 | 0 | data = &p->val; |
935 | 0 | if (Z_TYPE_P(data) == IS_INDIRECT) { |
936 | 0 | data = Z_INDIRECT_P(data); |
937 | 0 | if (Z_TYPE_P(data) != IS_UNDEF) { |
938 | 0 | return NULL; |
939 | 0 | } |
940 | 0 | } else { |
941 | 0 | return NULL; |
942 | 0 | } |
943 | 0 | } else { |
944 | 0 | ZEND_ASSERT(&p->val != pData); |
945 | 0 | data = &p->val; |
946 | 0 | if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) { |
947 | 0 | data = Z_INDIRECT_P(data); |
948 | 0 | } |
949 | 0 | } |
950 | 0 | if (ht->pDestructor) { |
951 | 0 | ht->pDestructor(data); |
952 | 0 | } |
953 | 0 | ZVAL_COPY_VALUE(data, pData); |
954 | 0 | return data; |
955 | 0 | } |
956 | 10 | } |
957 | | |
958 | 10 | ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */ |
959 | | |
960 | 90.6k | add_to_hash: |
961 | 90.6k | idx = ht->nNumUsed++; |
962 | 90.6k | ht->nNumOfElements++; |
963 | 90.6k | p = ht->arData + idx; |
964 | 90.6k | p->key = key = zend_string_init(str, len, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT); |
965 | | #if ZEND_RC_DEBUG |
966 | | if (GC_FLAGS(ht) & GC_PERSISTENT_LOCAL) { |
967 | | GC_MAKE_PERSISTENT_LOCAL(key); |
968 | | } |
969 | | #endif |
970 | 90.6k | p->h = ZSTR_H(key) = h; |
971 | 90.6k | HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS; |
972 | 90.6k | if (flag & HASH_LOOKUP) { |
973 | 0 | ZVAL_NULL(&p->val); |
974 | 90.6k | } else { |
975 | 90.6k | ZVAL_COPY_VALUE(&p->val, pData); |
976 | 90.6k | } |
977 | 90.6k | nIndex = h | ht->nTableMask; |
978 | 90.6k | Z_NEXT(p->val) = HT_HASH(ht, nIndex); |
979 | 90.6k | HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx); |
980 | | |
981 | 90.6k | return &p->val; |
982 | 10 | } |
983 | | |
984 | | ZEND_API zval* ZEND_FASTCALL zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag) |
985 | 0 | { |
986 | 0 | if (flag == HASH_ADD) { |
987 | 0 | return zend_hash_add(ht, key, pData); |
988 | 0 | } else if (flag == HASH_ADD_NEW) { |
989 | 0 | return zend_hash_add_new(ht, key, pData); |
990 | 0 | } else if (flag == HASH_UPDATE) { |
991 | 0 | return zend_hash_update(ht, key, pData); |
992 | 0 | } else { |
993 | 0 | ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT)); |
994 | 0 | return zend_hash_update_ind(ht, key, pData); |
995 | 0 | } |
996 | 0 | } |
997 | | |
998 | | ZEND_API zval* ZEND_FASTCALL zend_hash_add(HashTable *ht, zend_string *key, zval *pData) |
999 | 101k | { |
1000 | 101k | return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD); |
1001 | 101k | } |
1002 | | |
1003 | | ZEND_API zval* ZEND_FASTCALL zend_hash_update(HashTable *ht, zend_string *key, zval *pData) |
1004 | 196k | { |
1005 | 196k | return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE); |
1006 | 196k | } |
1007 | | |
1008 | | ZEND_API zval* ZEND_FASTCALL zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData) |
1009 | 0 | { |
1010 | 0 | return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT); |
1011 | 0 | } |
1012 | | |
1013 | | ZEND_API zval* ZEND_FASTCALL zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData) |
1014 | 245k | { |
1015 | 245k | return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW); |
1016 | 245k | } |
1017 | | |
1018 | | ZEND_API zval* ZEND_FASTCALL zend_hash_lookup(HashTable *ht, zend_string *key) |
1019 | 334k | { |
1020 | 334k | return _zend_hash_add_or_update_i(ht, key, NULL, HASH_LOOKUP); |
1021 | 334k | } |
1022 | | |
1023 | | ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag) |
1024 | 0 | { |
1025 | 0 | if (flag == HASH_ADD) { |
1026 | 0 | return zend_hash_str_add(ht, str, len, pData); |
1027 | 0 | } else if (flag == HASH_ADD_NEW) { |
1028 | 0 | return zend_hash_str_add_new(ht, str, len, pData); |
1029 | 0 | } else if (flag == HASH_UPDATE) { |
1030 | 0 | return zend_hash_str_update(ht, str, len, pData); |
1031 | 0 | } else { |
1032 | 0 | ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT)); |
1033 | 0 | return zend_hash_str_update_ind(ht, str, len, pData); |
1034 | 0 | } |
1035 | 0 | } |
1036 | | |
1037 | | ZEND_API zval* ZEND_FASTCALL zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData) |
1038 | 90.3k | { |
1039 | 90.3k | zend_ulong h = zend_hash_func(str, len); |
1040 | | |
1041 | 90.3k | return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE); |
1042 | 90.3k | } |
1043 | | |
1044 | | ZEND_API zval* ZEND_FASTCALL zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData) |
1045 | 0 | { |
1046 | 0 | zend_ulong h = zend_hash_func(str, len); |
1047 | |
|
1048 | 0 | return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT); |
1049 | 0 | } |
1050 | | |
1051 | | ZEND_API zval* ZEND_FASTCALL zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData) |
1052 | 305 | { |
1053 | 305 | zend_ulong h = zend_hash_func(str, len); |
1054 | | |
1055 | 305 | return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD); |
1056 | 305 | } |
1057 | | |
1058 | | ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData) |
1059 | 0 | { |
1060 | 0 | zend_ulong h = zend_hash_func(str, len); |
1061 | |
|
1062 | 0 | return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD_NEW); |
1063 | 0 | } |
1064 | | |
1065 | | ZEND_API zval* ZEND_FASTCALL zend_hash_str_lookup(HashTable *ht, const char *str, size_t len) |
1066 | 0 | { |
1067 | 0 | zend_ulong h = zend_hash_func(str, len); |
1068 | |
|
1069 | 0 | return _zend_hash_str_add_or_update_i(ht, str, len, h, NULL, HASH_LOOKUP); |
1070 | 0 | } |
1071 | | |
1072 | | ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h) |
1073 | 0 | { |
1074 | 0 | zval dummy; |
1075 | |
|
1076 | 0 | ZVAL_NULL(&dummy); |
1077 | 0 | return zend_hash_index_add(ht, h, &dummy); |
1078 | 0 | } |
1079 | | |
1080 | | ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key) |
1081 | 91.1k | { |
1082 | 91.1k | zval dummy; |
1083 | | |
1084 | 91.1k | ZVAL_NULL(&dummy); |
1085 | 91.1k | return zend_hash_add(ht, key, &dummy); |
1086 | 91.1k | } |
1087 | | |
1088 | | ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len) |
1089 | 0 | { |
1090 | 0 | zval dummy; |
1091 | |
|
1092 | 0 | ZVAL_NULL(&dummy); |
1093 | 0 | return zend_hash_str_add(ht, str, len, &dummy); |
1094 | 0 | } |
1095 | | |
1096 | | static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag) |
1097 | 991k | { |
1098 | 991k | uint32_t nIndex; |
1099 | 991k | uint32_t idx; |
1100 | 991k | Bucket *p; |
1101 | 991k | zval *zv; |
1102 | | |
1103 | 991k | IS_CONSISTENT(ht); |
1104 | 991k | HT_ASSERT_RC1(ht); |
1105 | | |
1106 | 991k | if ((flag & HASH_ADD_NEXT) && h == ZEND_LONG_MIN) { |
1107 | 40.6k | h = 0; |
1108 | 40.6k | } |
1109 | | |
1110 | 991k | if (HT_IS_PACKED(ht)) { |
1111 | 10.9k | if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT) |
1112 | 10.9k | && h < ht->nNumUsed) { |
1113 | 1.05k | zv = ht->arPacked + h; |
1114 | 1.05k | if (Z_TYPE_P(zv) != IS_UNDEF) { |
1115 | 28 | if (flag & HASH_LOOKUP) { |
1116 | 28 | return zv; |
1117 | 28 | } |
1118 | 0 | replace: |
1119 | 0 | if (flag & HASH_ADD) { |
1120 | 0 | return NULL; |
1121 | 0 | } |
1122 | 0 | if (ht->pDestructor) { |
1123 | 0 | ht->pDestructor(zv); |
1124 | 0 | } |
1125 | 0 | ZVAL_COPY_VALUE(zv, pData); |
1126 | 0 | return zv; |
1127 | 1.02k | } else { /* we have to keep the order :( */ |
1128 | 1.02k | goto convert_to_hash; |
1129 | 1.02k | } |
1130 | 9.93k | } else if (EXPECTED(h < ht->nTableSize)) { |
1131 | 53.6k | add_to_packed: |
1132 | 53.6k | zv = ht->arPacked + h; |
1133 | | /* incremental initialization of empty Buckets */ |
1134 | 53.6k | if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)) { |
1135 | 13.1k | if (h > ht->nNumUsed) { |
1136 | 5.07k | zval *q = ht->arPacked + ht->nNumUsed; |
1137 | 16.5k | while (q != zv) { |
1138 | 11.4k | ZVAL_UNDEF(q); |
1139 | 11.4k | q++; |
1140 | 11.4k | } |
1141 | 5.07k | } |
1142 | 13.1k | } |
1143 | 53.6k | ht->nNextFreeElement = ht->nNumUsed = h + 1; |
1144 | 53.6k | ht->nNumOfElements++; |
1145 | 53.6k | if (flag & HASH_LOOKUP) { |
1146 | 202 | ZVAL_NULL(zv); |
1147 | 53.4k | } else { |
1148 | 53.4k | ZVAL_COPY_VALUE(zv, pData); |
1149 | 53.4k | } |
1150 | | |
1151 | 53.6k | return zv; |
1152 | 7.84k | } else if ((h >> 1) < ht->nTableSize && |
1153 | 806 | (ht->nTableSize >> 1) < ht->nNumOfElements) { |
1154 | 0 | zend_hash_packed_grow(ht); |
1155 | 0 | goto add_to_packed; |
1156 | 2.08k | } else { |
1157 | 2.08k | if (ht->nNumUsed >= ht->nTableSize) { |
1158 | 39 | ht->nTableSize += ht->nTableSize; |
1159 | 39 | } |
1160 | 3.10k | convert_to_hash: |
1161 | 3.10k | zend_hash_packed_to_hash(ht); |
1162 | 3.10k | } |
1163 | 980k | } else if (HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED) { |
1164 | 51.2k | if (h < ht->nTableSize) { |
1165 | 45.8k | zend_hash_real_init_packed_ex(ht); |
1166 | 45.8k | goto add_to_packed; |
1167 | 45.8k | } |
1168 | 5.48k | zend_hash_real_init_mixed(ht); |
1169 | 929k | } else { |
1170 | 929k | if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) { |
1171 | 929k | p = zend_hash_index_find_bucket(ht, h); |
1172 | 929k | if (p) { |
1173 | 147k | if (flag & HASH_LOOKUP) { |
1174 | 147k | return &p->val; |
1175 | 147k | } |
1176 | 0 | ZEND_ASSERT((flag & HASH_ADD_NEW) == 0); |
1177 | 0 | zv = &p->val; |
1178 | 0 | goto replace; |
1179 | 0 | } |
1180 | 929k | } |
1181 | 782k | ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */ |
1182 | 782k | } |
1183 | | |
1184 | 790k | idx = ht->nNumUsed++; |
1185 | 790k | nIndex = h | ht->nTableMask; |
1186 | 790k | p = ht->arData + idx; |
1187 | 790k | Z_NEXT(p->val) = HT_HASH(ht, nIndex); |
1188 | 790k | HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx); |
1189 | 790k | if ((zend_long)h >= ht->nNextFreeElement) { |
1190 | 485k | ht->nNextFreeElement = (zend_long)h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX; |
1191 | 485k | } |
1192 | 790k | ht->nNumOfElements++; |
1193 | 790k | p->h = h; |
1194 | 790k | p->key = NULL; |
1195 | 790k | if (flag & HASH_LOOKUP) { |
1196 | 693k | ZVAL_NULL(&p->val); |
1197 | 693k | } else { |
1198 | 97.6k | ZVAL_COPY_VALUE(&p->val, pData); |
1199 | 97.6k | } |
1200 | | |
1201 | 790k | return &p->val; |
1202 | 991k | } |
1203 | | |
1204 | | ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag) |
1205 | 0 | { |
1206 | 0 | if (flag == HASH_ADD) { |
1207 | 0 | return zend_hash_index_add(ht, h, pData); |
1208 | 0 | } else if (flag == (HASH_ADD|HASH_ADD_NEW)) { |
1209 | 0 | return zend_hash_index_add_new(ht, h, pData); |
1210 | 0 | } else if (flag == (HASH_ADD|HASH_ADD_NEXT)) { |
1211 | 0 | ZEND_ASSERT(h == ht->nNextFreeElement); |
1212 | 0 | return zend_hash_next_index_insert(ht, pData); |
1213 | 0 | } else if (flag == (HASH_ADD|HASH_ADD_NEW|HASH_ADD_NEXT)) { |
1214 | 0 | ZEND_ASSERT(h == ht->nNextFreeElement); |
1215 | 0 | return zend_hash_next_index_insert_new(ht, pData); |
1216 | 0 | } else { |
1217 | 0 | ZEND_ASSERT(flag == HASH_UPDATE); |
1218 | 0 | return zend_hash_index_update(ht, h, pData); |
1219 | 0 | } |
1220 | 0 | } |
1221 | | |
1222 | | ZEND_API zval* ZEND_FASTCALL zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData) |
1223 | 0 | { |
1224 | 0 | return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD); |
1225 | 0 | } |
1226 | | |
1227 | | ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData) |
1228 | 0 | { |
1229 | 0 | return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW); |
1230 | 0 | } |
1231 | | |
1232 | | ZEND_API zval* ZEND_FASTCALL zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData) |
1233 | 110k | { |
1234 | 110k | return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE); |
1235 | 110k | } |
1236 | | |
1237 | | ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert(HashTable *ht, zval *pData) |
1238 | 162 | { |
1239 | 162 | return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT); |
1240 | 162 | } |
1241 | | |
1242 | | ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert_new(HashTable *ht, zval *pData) |
1243 | 40.5k | { |
1244 | 40.5k | return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT); |
1245 | 40.5k | } |
1246 | | |
1247 | | ZEND_API zval* ZEND_FASTCALL zend_hash_index_lookup(HashTable *ht, zend_ulong h) |
1248 | 840k | { |
1249 | 840k | return _zend_hash_index_add_or_update_i(ht, h, NULL, HASH_LOOKUP); |
1250 | 840k | } |
1251 | | |
1252 | | ZEND_API zval* ZEND_FASTCALL zend_hash_set_bucket_key(HashTable *ht, Bucket *b, zend_string *key) |
1253 | 0 | { |
1254 | 0 | uint32_t nIndex; |
1255 | 0 | uint32_t idx, i; |
1256 | 0 | Bucket *p, *arData; |
1257 | |
|
1258 | 0 | IS_CONSISTENT(ht); |
1259 | 0 | HT_ASSERT_RC1(ht); |
1260 | 0 | ZEND_ASSERT(!HT_IS_PACKED(ht)); |
1261 | |
|
1262 | 0 | (void)zend_string_hash_val(key); |
1263 | 0 | p = zend_hash_find_bucket(ht, key); |
1264 | 0 | if (UNEXPECTED(p)) { |
1265 | 0 | return (p == b) ? &p->val : NULL; |
1266 | 0 | } |
1267 | | |
1268 | 0 | if (!ZSTR_IS_INTERNED(key)) { |
1269 | 0 | zend_string_addref(key); |
1270 | 0 | HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS; |
1271 | 0 | } |
1272 | |
|
1273 | 0 | arData = ht->arData; |
1274 | | |
1275 | | /* del from hash */ |
1276 | 0 | idx = HT_IDX_TO_HASH(b - arData); |
1277 | 0 | nIndex = b->h | ht->nTableMask; |
1278 | 0 | i = HT_HASH_EX(arData, nIndex); |
1279 | 0 | if (i == idx) { |
1280 | 0 | HT_HASH_EX(arData, nIndex) = Z_NEXT(b->val); |
1281 | 0 | } else { |
1282 | 0 | p = HT_HASH_TO_BUCKET_EX(arData, i); |
1283 | 0 | while (Z_NEXT(p->val) != idx) { |
1284 | 0 | i = Z_NEXT(p->val); |
1285 | 0 | p = HT_HASH_TO_BUCKET_EX(arData, i); |
1286 | 0 | } |
1287 | 0 | Z_NEXT(p->val) = Z_NEXT(b->val); |
1288 | 0 | } |
1289 | 0 | zend_string_release(b->key); |
1290 | | |
1291 | | /* add to hash */ |
1292 | 0 | idx = b - arData; |
1293 | 0 | b->key = key; |
1294 | 0 | b->h = ZSTR_H(key); |
1295 | 0 | nIndex = b->h | ht->nTableMask; |
1296 | 0 | idx = HT_IDX_TO_HASH(idx); |
1297 | 0 | i = HT_HASH_EX(arData, nIndex); |
1298 | 0 | if (i == HT_INVALID_IDX || i < idx) { |
1299 | 0 | Z_NEXT(b->val) = i; |
1300 | 0 | HT_HASH_EX(arData, nIndex) = idx; |
1301 | 0 | } else { |
1302 | 0 | p = HT_HASH_TO_BUCKET_EX(arData, i); |
1303 | 0 | while (Z_NEXT(p->val) != HT_INVALID_IDX && Z_NEXT(p->val) > idx) { |
1304 | 0 | i = Z_NEXT(p->val); |
1305 | 0 | p = HT_HASH_TO_BUCKET_EX(arData, i); |
1306 | 0 | } |
1307 | 0 | Z_NEXT(b->val) = Z_NEXT(p->val); |
1308 | 0 | Z_NEXT(p->val) = idx; |
1309 | 0 | } |
1310 | 0 | return &b->val; |
1311 | 0 | } |
1312 | | |
1313 | | static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht) |
1314 | 1.10k | { |
1315 | | |
1316 | 1.10k | IS_CONSISTENT(ht); |
1317 | 1.10k | HT_ASSERT_RC1(ht); |
1318 | | |
1319 | 1.10k | ZEND_ASSERT(!HT_IS_PACKED(ht)); |
1320 | 1.10k | if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */ |
1321 | 585 | zend_hash_rehash(ht); |
1322 | 585 | } else if (ht->nTableSize < HT_MAX_SIZE) { /* Let's double the table size */ |
1323 | 516 | void *new_data, *old_data = HT_GET_DATA_ADDR(ht); |
1324 | 516 | uint32_t nSize = ht->nTableSize + ht->nTableSize; |
1325 | 516 | Bucket *old_buckets = ht->arData; |
1326 | | |
1327 | 516 | ZEND_ASSERT(HT_SIZE_TO_MASK(nSize)); |
1328 | | |
1329 | 516 | new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT); |
1330 | 516 | ht->nTableSize = nSize; |
1331 | 516 | ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize); |
1332 | 516 | HT_SET_DATA_ADDR(ht, new_data); |
1333 | 516 | memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); |
1334 | 516 | pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT); |
1335 | 516 | zend_hash_rehash(ht); |
1336 | 516 | } else { |
1337 | 0 | zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket)); |
1338 | 0 | } |
1339 | 1.10k | } |
1340 | | |
1341 | | ZEND_API void ZEND_FASTCALL zend_hash_rehash(HashTable *ht) |
1342 | 108k | { |
1343 | 108k | Bucket *p; |
1344 | 108k | uint32_t nIndex, i; |
1345 | | |
1346 | 108k | IS_CONSISTENT(ht); |
1347 | | |
1348 | 108k | if (UNEXPECTED(ht->nNumOfElements == 0)) { |
1349 | 0 | if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) { |
1350 | 0 | ht->nNumUsed = 0; |
1351 | 0 | HT_HASH_RESET(ht); |
1352 | | /* Even if the array is empty, we still need to reset the iterator positions. */ |
1353 | 0 | ht->nInternalPointer = 0; |
1354 | 0 | if (UNEXPECTED(HT_HAS_ITERATORS(ht))) { |
1355 | 0 | HashTableIterator *iter = EG(ht_iterators); |
1356 | 0 | HashTableIterator *end = iter + EG(ht_iterators_used); |
1357 | 0 | while (iter != end) { |
1358 | 0 | if (iter->ht == ht) { |
1359 | 0 | iter->pos = 0; |
1360 | 0 | } |
1361 | 0 | iter++; |
1362 | 0 | } |
1363 | 0 | } |
1364 | 0 | } |
1365 | 0 | return; |
1366 | 0 | } |
1367 | | |
1368 | 108k | HT_HASH_RESET(ht); |
1369 | 108k | i = 0; |
1370 | 108k | p = ht->arData; |
1371 | 108k | if (HT_IS_WITHOUT_HOLES(ht)) { |
1372 | 518k | do { |
1373 | 518k | nIndex = p->h | ht->nTableMask; |
1374 | 518k | Z_NEXT(p->val) = HT_HASH(ht, nIndex); |
1375 | 518k | HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i); |
1376 | 518k | p++; |
1377 | 518k | } while (++i < ht->nNumUsed); |
1378 | 106k | } else { |
1379 | 1.87k | uint32_t old_num_used = ht->nNumUsed; |
1380 | 142k | do { |
1381 | 142k | if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) { |
1382 | 1.87k | uint32_t j = i; |
1383 | 1.87k | Bucket *q = p; |
1384 | | |
1385 | 1.87k | if (EXPECTED(!HT_HAS_ITERATORS(ht))) { |
1386 | 69.5k | while (++i < ht->nNumUsed) { |
1387 | 67.7k | p++; |
1388 | 67.7k | if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) { |
1389 | 49.1k | ZVAL_COPY_VALUE(&q->val, &p->val); |
1390 | 49.1k | q->h = p->h; |
1391 | 49.1k | nIndex = q->h | ht->nTableMask; |
1392 | 49.1k | q->key = p->key; |
1393 | 49.1k | Z_NEXT(q->val) = HT_HASH(ht, nIndex); |
1394 | 49.1k | HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j); |
1395 | 49.1k | if (UNEXPECTED(ht->nInternalPointer > j && ht->nInternalPointer <= i)) { |
1396 | 0 | ht->nInternalPointer = j; |
1397 | 0 | } |
1398 | 49.1k | q++; |
1399 | 49.1k | j++; |
1400 | 49.1k | } |
1401 | 67.7k | } |
1402 | 1.87k | } else { |
1403 | 0 | uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, i + 1); |
1404 | |
|
1405 | 0 | while (++i < ht->nNumUsed) { |
1406 | 0 | p++; |
1407 | 0 | if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) { |
1408 | 0 | ZVAL_COPY_VALUE(&q->val, &p->val); |
1409 | 0 | q->h = p->h; |
1410 | 0 | nIndex = q->h | ht->nTableMask; |
1411 | 0 | q->key = p->key; |
1412 | 0 | Z_NEXT(q->val) = HT_HASH(ht, nIndex); |
1413 | 0 | HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j); |
1414 | 0 | if (UNEXPECTED(ht->nInternalPointer > j && ht->nInternalPointer <= i)) { |
1415 | 0 | ht->nInternalPointer = j; |
1416 | 0 | } |
1417 | 0 | if (UNEXPECTED(i >= iter_pos)) { |
1418 | 0 | do { |
1419 | 0 | zend_hash_iterators_update(ht, iter_pos, j); |
1420 | 0 | iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1); |
1421 | 0 | } while (iter_pos < i); |
1422 | 0 | } |
1423 | 0 | q++; |
1424 | 0 | j++; |
1425 | 0 | } |
1426 | 0 | } |
1427 | 0 | } |
1428 | 1.87k | ht->nNumUsed = j; |
1429 | 1.87k | break; |
1430 | 1.87k | } |
1431 | 140k | nIndex = p->h | ht->nTableMask; |
1432 | 140k | Z_NEXT(p->val) = HT_HASH(ht, nIndex); |
1433 | 140k | HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i); |
1434 | 140k | p++; |
1435 | 140k | } while (++i < ht->nNumUsed); |
1436 | | |
1437 | | /* Migrate pointer to one past the end of the array to the new one past the end, so that |
1438 | | * newly inserted elements are picked up correctly. */ |
1439 | 1.87k | if (UNEXPECTED(HT_HAS_ITERATORS(ht))) { |
1440 | 0 | _zend_hash_iterators_update(ht, old_num_used, ht->nNumUsed); |
1441 | 0 | } |
1442 | 1.87k | } |
1443 | 108k | } |
1444 | | |
1445 | | static zend_always_inline void zend_hash_iterators_clamp_max(const HashTable *ht, uint32_t max) |
1446 | 293k | { |
1447 | 293k | if (UNEXPECTED(HT_HAS_ITERATORS(ht))) { |
1448 | 0 | HashTableIterator *iter = EG(ht_iterators); |
1449 | 0 | const HashTableIterator *end = iter + EG(ht_iterators_used); |
1450 | 0 | while (iter != end) { |
1451 | 0 | if (iter->ht == ht) { |
1452 | 0 | iter->pos = MIN(iter->pos, max); |
1453 | 0 | } |
1454 | 0 | iter++; |
1455 | 0 | } |
1456 | 0 | } |
1457 | 293k | } |
1458 | | |
1459 | | static zend_always_inline void _zend_hash_packed_del_val(HashTable *ht, uint32_t idx, zval *zv) |
1460 | 0 | { |
1461 | 0 | idx = HT_HASH_TO_IDX(idx); |
1462 | 0 | ht->nNumOfElements--; |
1463 | 0 | if (ht->nNumUsed - 1 == idx) { |
1464 | 0 | do { |
1465 | 0 | ht->nNumUsed--; |
1466 | 0 | } while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arPacked[ht->nNumUsed-1]) == IS_UNDEF))); |
1467 | 0 | ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed); |
1468 | 0 | zend_hash_iterators_clamp_max(ht, ht->nNumUsed); |
1469 | 0 | } |
1470 | 0 | if (ht->pDestructor) { |
1471 | 0 | zval tmp; |
1472 | 0 | ZVAL_COPY_VALUE(&tmp, zv); |
1473 | 0 | ZVAL_UNDEF(zv); |
1474 | 0 | ht->pDestructor(&tmp); |
1475 | 0 | } else { |
1476 | 0 | ZVAL_UNDEF(zv); |
1477 | 0 | } |
1478 | 0 | } |
1479 | | |
1480 | | static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev) |
1481 | 312k | { |
1482 | 312k | if (prev) { |
1483 | 276 | Z_NEXT(prev->val) = Z_NEXT(p->val); |
1484 | 311k | } else { |
1485 | 311k | HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val); |
1486 | 311k | } |
1487 | 312k | idx = HT_HASH_TO_IDX(idx); |
1488 | 312k | ht->nNumOfElements--; |
1489 | 312k | if (ht->nNumUsed - 1 == idx) { |
1490 | 294k | do { |
1491 | 294k | ht->nNumUsed--; |
1492 | 294k | } while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF))); |
1493 | 293k | ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed); |
1494 | 293k | zend_hash_iterators_clamp_max(ht, ht->nNumUsed); |
1495 | 293k | } |
1496 | 312k | if (ht->pDestructor) { |
1497 | 192k | zval tmp; |
1498 | 192k | ZVAL_COPY_VALUE(&tmp, &p->val); |
1499 | 192k | ZVAL_UNDEF(&p->val); |
1500 | 192k | ht->pDestructor(&tmp); |
1501 | 192k | } else { |
1502 | 119k | ZVAL_UNDEF(&p->val); |
1503 | 119k | } |
1504 | 312k | } |
1505 | | |
1506 | | static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p) |
1507 | 192k | { |
1508 | 192k | Bucket *prev = NULL; |
1509 | 192k | uint32_t nIndex; |
1510 | 192k | uint32_t i; |
1511 | | |
1512 | 192k | nIndex = p->h | ht->nTableMask; |
1513 | 192k | i = HT_HASH(ht, nIndex); |
1514 | | |
1515 | 192k | if (i != idx) { |
1516 | 0 | prev = HT_HASH_TO_BUCKET(ht, i); |
1517 | 0 | while (Z_NEXT(prev->val) != idx) { |
1518 | 0 | i = Z_NEXT(prev->val); |
1519 | 0 | prev = HT_HASH_TO_BUCKET(ht, i); |
1520 | 0 | } |
1521 | 0 | } |
1522 | | |
1523 | 192k | if (p->key) { |
1524 | 192k | zend_string_release(p->key); |
1525 | 192k | p->key = NULL; |
1526 | 192k | } |
1527 | 192k | _zend_hash_del_el_ex(ht, idx, p, prev); |
1528 | 192k | } |
1529 | | |
1530 | | ZEND_API void ZEND_FASTCALL zend_hash_packed_del_val(HashTable *ht, zval *zv) |
1531 | 0 | { |
1532 | 0 | IS_CONSISTENT(ht); |
1533 | 0 | HT_ASSERT_RC1(ht); |
1534 | 0 | ZEND_ASSERT(HT_IS_PACKED(ht)); |
1535 | 0 | _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(zv - ht->arPacked), zv); |
1536 | 0 | } |
1537 | | |
1538 | | |
1539 | | ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p) |
1540 | 0 | { |
1541 | 0 | IS_CONSISTENT(ht); |
1542 | 0 | HT_ASSERT_RC1(ht); |
1543 | 0 | ZEND_ASSERT(!HT_IS_PACKED(ht)); |
1544 | 0 | _zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p); |
1545 | 0 | } |
1546 | | |
1547 | | ZEND_API zend_result ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key) |
1548 | 91.1k | { |
1549 | 91.1k | zend_ulong h; |
1550 | 91.1k | uint32_t nIndex; |
1551 | 91.1k | uint32_t idx; |
1552 | 91.1k | Bucket *p; |
1553 | 91.1k | Bucket *prev = NULL; |
1554 | | |
1555 | 91.1k | IS_CONSISTENT(ht); |
1556 | 91.1k | HT_ASSERT_RC1(ht); |
1557 | | |
1558 | 91.1k | h = zend_string_hash_val(key); |
1559 | 91.1k | nIndex = h | ht->nTableMask; |
1560 | | |
1561 | 91.1k | idx = HT_HASH(ht, nIndex); |
1562 | 91.1k | while (idx != HT_INVALID_IDX) { |
1563 | 91.1k | p = HT_HASH_TO_BUCKET(ht, idx); |
1564 | 91.1k | if ((p->key == key) || |
1565 | 0 | (p->h == h && |
1566 | 0 | p->key && |
1567 | 91.1k | zend_string_equal_content(p->key, key))) { |
1568 | 91.1k | zend_string_release(p->key); |
1569 | 91.1k | p->key = NULL; |
1570 | 91.1k | _zend_hash_del_el_ex(ht, idx, p, prev); |
1571 | 91.1k | return SUCCESS; |
1572 | 91.1k | } |
1573 | 0 | prev = p; |
1574 | 0 | idx = Z_NEXT(p->val); |
1575 | 0 | } |
1576 | 0 | return FAILURE; |
1577 | 91.1k | } |
1578 | | |
1579 | | ZEND_API zend_result ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key) |
1580 | 0 | { |
1581 | 0 | zend_ulong h; |
1582 | 0 | uint32_t nIndex; |
1583 | 0 | uint32_t idx; |
1584 | 0 | Bucket *p; |
1585 | 0 | Bucket *prev = NULL; |
1586 | |
|
1587 | 0 | IS_CONSISTENT(ht); |
1588 | 0 | HT_ASSERT_RC1(ht); |
1589 | |
|
1590 | 0 | h = zend_string_hash_val(key); |
1591 | 0 | nIndex = h | ht->nTableMask; |
1592 | |
|
1593 | 0 | idx = HT_HASH(ht, nIndex); |
1594 | 0 | while (idx != HT_INVALID_IDX) { |
1595 | 0 | p = HT_HASH_TO_BUCKET(ht, idx); |
1596 | 0 | if ((p->key == key) || |
1597 | 0 | (p->h == h && |
1598 | 0 | p->key && |
1599 | 0 | zend_string_equal_content(p->key, key))) { |
1600 | 0 | if (Z_TYPE(p->val) == IS_INDIRECT) { |
1601 | 0 | zval *data = Z_INDIRECT(p->val); |
1602 | |
|
1603 | 0 | if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) { |
1604 | 0 | return FAILURE; |
1605 | 0 | } else { |
1606 | 0 | if (ht->pDestructor) { |
1607 | 0 | zval tmp; |
1608 | 0 | ZVAL_COPY_VALUE(&tmp, data); |
1609 | 0 | ZVAL_UNDEF(data); |
1610 | 0 | ht->pDestructor(&tmp); |
1611 | 0 | } else { |
1612 | 0 | ZVAL_UNDEF(data); |
1613 | 0 | } |
1614 | 0 | HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND; |
1615 | 0 | } |
1616 | 0 | } else { |
1617 | 0 | zend_string_release(p->key); |
1618 | 0 | p->key = NULL; |
1619 | 0 | _zend_hash_del_el_ex(ht, idx, p, prev); |
1620 | 0 | } |
1621 | 0 | return SUCCESS; |
1622 | 0 | } |
1623 | 0 | prev = p; |
1624 | 0 | idx = Z_NEXT(p->val); |
1625 | 0 | } |
1626 | 0 | return FAILURE; |
1627 | 0 | } |
1628 | | |
1629 | | ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len) |
1630 | 0 | { |
1631 | 0 | zend_ulong h; |
1632 | 0 | uint32_t nIndex; |
1633 | 0 | uint32_t idx; |
1634 | 0 | Bucket *p; |
1635 | 0 | Bucket *prev = NULL; |
1636 | |
|
1637 | 0 | IS_CONSISTENT(ht); |
1638 | 0 | HT_ASSERT_RC1(ht); |
1639 | |
|
1640 | 0 | h = zend_inline_hash_func(str, len); |
1641 | 0 | nIndex = h | ht->nTableMask; |
1642 | |
|
1643 | 0 | idx = HT_HASH(ht, nIndex); |
1644 | 0 | while (idx != HT_INVALID_IDX) { |
1645 | 0 | p = HT_HASH_TO_BUCKET(ht, idx); |
1646 | 0 | if ((p->h == h) |
1647 | 0 | && p->key |
1648 | 0 | && zend_string_equals_cstr(p->key, str, len)) { |
1649 | 0 | if (Z_TYPE(p->val) == IS_INDIRECT) { |
1650 | 0 | zval *data = Z_INDIRECT(p->val); |
1651 | |
|
1652 | 0 | if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) { |
1653 | 0 | return FAILURE; |
1654 | 0 | } else { |
1655 | 0 | if (ht->pDestructor) { |
1656 | 0 | ht->pDestructor(data); |
1657 | 0 | } |
1658 | 0 | ZVAL_UNDEF(data); |
1659 | 0 | HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND; |
1660 | 0 | } |
1661 | 0 | } else { |
1662 | 0 | zend_string_release(p->key); |
1663 | 0 | p->key = NULL; |
1664 | 0 | _zend_hash_del_el_ex(ht, idx, p, prev); |
1665 | 0 | } |
1666 | 0 | return SUCCESS; |
1667 | 0 | } |
1668 | 0 | prev = p; |
1669 | 0 | idx = Z_NEXT(p->val); |
1670 | 0 | } |
1671 | 0 | return FAILURE; |
1672 | 0 | } |
1673 | | |
1674 | | ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len) |
1675 | 72 | { |
1676 | 72 | zend_ulong h; |
1677 | 72 | uint32_t nIndex; |
1678 | 72 | uint32_t idx; |
1679 | 72 | Bucket *p; |
1680 | 72 | Bucket *prev = NULL; |
1681 | | |
1682 | 72 | IS_CONSISTENT(ht); |
1683 | 72 | HT_ASSERT_RC1(ht); |
1684 | | |
1685 | 72 | h = zend_inline_hash_func(str, len); |
1686 | 72 | nIndex = h | ht->nTableMask; |
1687 | | |
1688 | 72 | idx = HT_HASH(ht, nIndex); |
1689 | 90 | while (idx != HT_INVALID_IDX) { |
1690 | 82 | p = HT_HASH_TO_BUCKET(ht, idx); |
1691 | 82 | if ((p->h == h) |
1692 | 64 | && p->key |
1693 | 64 | && zend_string_equals_cstr(p->key, str, len)) { |
1694 | 64 | zend_string_release(p->key); |
1695 | 64 | p->key = NULL; |
1696 | 64 | _zend_hash_del_el_ex(ht, idx, p, prev); |
1697 | 64 | return SUCCESS; |
1698 | 64 | } |
1699 | 18 | prev = p; |
1700 | 18 | idx = Z_NEXT(p->val); |
1701 | 18 | } |
1702 | 8 | return FAILURE; |
1703 | 72 | } |
1704 | | |
1705 | | ZEND_API zend_result ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h) |
1706 | 122k | { |
1707 | 122k | uint32_t nIndex; |
1708 | 122k | uint32_t idx; |
1709 | 122k | Bucket *p; |
1710 | 122k | Bucket *prev = NULL; |
1711 | | |
1712 | 122k | IS_CONSISTENT(ht); |
1713 | 122k | HT_ASSERT_RC1(ht); |
1714 | | |
1715 | 122k | if (HT_IS_PACKED(ht)) { |
1716 | 0 | if (h < ht->nNumUsed) { |
1717 | 0 | zval *zv = ht->arPacked + h; |
1718 | 0 | if (Z_TYPE_P(zv) != IS_UNDEF) { |
1719 | 0 | _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(h), zv); |
1720 | 0 | return SUCCESS; |
1721 | 0 | } |
1722 | 0 | } |
1723 | 0 | return FAILURE; |
1724 | 0 | } |
1725 | 122k | nIndex = h | ht->nTableMask; |
1726 | | |
1727 | 122k | idx = HT_HASH(ht, nIndex); |
1728 | 617k | while (idx != HT_INVALID_IDX) { |
1729 | 523k | p = HT_HASH_TO_BUCKET(ht, idx); |
1730 | 523k | if ((p->h == h) && (p->key == NULL)) { |
1731 | 28.5k | _zend_hash_del_el_ex(ht, idx, p, prev); |
1732 | 28.5k | return SUCCESS; |
1733 | 28.5k | } |
1734 | 495k | prev = p; |
1735 | 495k | idx = Z_NEXT(p->val); |
1736 | 495k | } |
1737 | 93.8k | return FAILURE; |
1738 | 122k | } |
1739 | | |
1740 | | ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht) |
1741 | 320k | { |
1742 | 320k | IS_CONSISTENT(ht); |
1743 | 320k | HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1); |
1744 | | |
1745 | 320k | if (ht->nNumUsed) { |
1746 | 6.57k | if (HT_IS_PACKED(ht)) { |
1747 | 110 | if (ht->pDestructor) { |
1748 | 110 | zval *zv = ht->arPacked; |
1749 | 110 | zval *end = zv + ht->nNumUsed; |
1750 | | |
1751 | 110 | SET_INCONSISTENT(HT_IS_DESTROYING); |
1752 | 110 | if (HT_IS_WITHOUT_HOLES(ht)) { |
1753 | 2 | do { |
1754 | 2 | ht->pDestructor(zv); |
1755 | 2 | } while (++zv != end); |
1756 | 108 | } else { |
1757 | 649 | do { |
1758 | 649 | if (EXPECTED(Z_TYPE_P(zv) != IS_UNDEF)) { |
1759 | 198 | ht->pDestructor(zv); |
1760 | 198 | } |
1761 | 649 | } while (++zv != end); |
1762 | 108 | } |
1763 | 110 | SET_INCONSISTENT(HT_DESTROYED); |
1764 | 110 | } |
1765 | 110 | zend_hash_iterators_remove(ht); |
1766 | 6.46k | } else { |
1767 | 6.46k | Bucket *p = ht->arData; |
1768 | 6.46k | Bucket *end = p + ht->nNumUsed; |
1769 | | |
1770 | 6.46k | if (ht->pDestructor) { |
1771 | 6.26k | SET_INCONSISTENT(HT_IS_DESTROYING); |
1772 | | |
1773 | 6.26k | if (HT_HAS_STATIC_KEYS_ONLY(ht)) { |
1774 | 5.90k | if (HT_IS_WITHOUT_HOLES(ht)) { |
1775 | 12.8k | do { |
1776 | 12.8k | ht->pDestructor(&p->val); |
1777 | 12.8k | } while (++p != end); |
1778 | 5.90k | } else { |
1779 | 0 | do { |
1780 | 0 | if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) { |
1781 | 0 | ht->pDestructor(&p->val); |
1782 | 0 | } |
1783 | 0 | } while (++p != end); |
1784 | 0 | } |
1785 | 5.90k | } else if (HT_IS_WITHOUT_HOLES(ht)) { |
1786 | 611 | do { |
1787 | 611 | ht->pDestructor(&p->val); |
1788 | 611 | if (EXPECTED(p->key)) { |
1789 | 410 | zend_string_release(p->key); |
1790 | 410 | } |
1791 | 611 | } while (++p != end); |
1792 | 359 | } else { |
1793 | 0 | do { |
1794 | 0 | if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) { |
1795 | 0 | ht->pDestructor(&p->val); |
1796 | 0 | if (EXPECTED(p->key)) { |
1797 | 0 | zend_string_release(p->key); |
1798 | 0 | } |
1799 | 0 | } |
1800 | 0 | } while (++p != end); |
1801 | 0 | } |
1802 | | |
1803 | 6.26k | SET_INCONSISTENT(HT_DESTROYED); |
1804 | 6.26k | } else { |
1805 | 199 | if (!HT_HAS_STATIC_KEYS_ONLY(ht)) { |
1806 | 0 | do { |
1807 | 0 | if (EXPECTED(p->key)) { |
1808 | 0 | zend_string_release(p->key); |
1809 | 0 | } |
1810 | 0 | } while (++p != end); |
1811 | 0 | } |
1812 | 199 | } |
1813 | 6.46k | zend_hash_iterators_remove(ht); |
1814 | 6.46k | } |
1815 | 314k | } else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) { |
1816 | 312k | return; |
1817 | 312k | } |
1818 | 8.84k | pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT); |
1819 | 8.84k | } |
1820 | | |
1821 | | ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht) |
1822 | 1.41M | { |
1823 | 1.41M | IS_CONSISTENT(ht); |
1824 | 1.41M | HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1); |
1825 | | |
1826 | | /* break possible cycles */ |
1827 | 1.41M | GC_REMOVE_FROM_BUFFER(ht); |
1828 | 1.41M | GC_TYPE_INFO(ht) = GC_NULL /*???| (GC_WHITE << 16)*/; |
1829 | | |
1830 | 1.41M | if (ht->nNumUsed) { |
1831 | | /* In some rare cases destructors of regular arrays may be changed */ |
1832 | 725k | if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) { |
1833 | 0 | zend_hash_destroy(ht); |
1834 | 0 | goto free_ht; |
1835 | 0 | } |
1836 | | |
1837 | 725k | SET_INCONSISTENT(HT_IS_DESTROYING); |
1838 | | |
1839 | 725k | if (HT_IS_PACKED(ht)) { |
1840 | 82.9k | zval *zv = ht->arPacked; |
1841 | 82.9k | zval *end = zv + ht->nNumUsed; |
1842 | | |
1843 | 94.3k | do { |
1844 | 94.3k | i_zval_ptr_dtor(zv); |
1845 | 94.3k | } while (++zv != end); |
1846 | 642k | } else { |
1847 | 642k | Bucket *p = ht->arData; |
1848 | 642k | Bucket *end = p + ht->nNumUsed; |
1849 | | |
1850 | 642k | if (HT_HAS_STATIC_KEYS_ONLY(ht)) { |
1851 | 1.02M | do { |
1852 | 1.02M | i_zval_ptr_dtor(&p->val); |
1853 | 1.02M | } while (++p != end); |
1854 | 356k | } else if (HT_IS_WITHOUT_HOLES(ht)) { |
1855 | 855k | do { |
1856 | 855k | i_zval_ptr_dtor(&p->val); |
1857 | 855k | if (EXPECTED(p->key)) { |
1858 | 826k | zend_string_release_ex(p->key, 0); |
1859 | 826k | } |
1860 | 855k | } while (++p != end); |
1861 | 356k | } else { |
1862 | 0 | do { |
1863 | 0 | if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) { |
1864 | 0 | i_zval_ptr_dtor(&p->val); |
1865 | 0 | if (EXPECTED(p->key)) { |
1866 | 0 | zend_string_release_ex(p->key, 0); |
1867 | 0 | } |
1868 | 0 | } |
1869 | 0 | } while (++p != end); |
1870 | 0 | } |
1871 | 642k | } |
1872 | 725k | } else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) { |
1873 | 683k | goto free_ht; |
1874 | 683k | } |
1875 | 730k | SET_INCONSISTENT(HT_DESTROYED); |
1876 | 730k | efree(HT_GET_DATA_ADDR(ht)); |
1877 | 1.41M | free_ht: |
1878 | 1.41M | zend_hash_iterators_remove(ht); |
1879 | 1.41M | FREE_HASHTABLE(ht); |
1880 | 1.41M | } |
1881 | | |
1882 | | ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht) |
1883 | 48.1k | { |
1884 | 48.1k | IS_CONSISTENT(ht); |
1885 | 48.1k | HT_ASSERT_RC1(ht); |
1886 | | |
1887 | 48.1k | if (ht->nNumUsed) { |
1888 | 0 | if (HT_IS_PACKED(ht)) { |
1889 | 0 | zval *zv = ht->arPacked; |
1890 | 0 | zval *end = zv + ht->nNumUsed; |
1891 | |
|
1892 | 0 | if (ht->pDestructor) { |
1893 | 0 | if (HT_HAS_STATIC_KEYS_ONLY(ht)) { |
1894 | 0 | if (HT_IS_WITHOUT_HOLES(ht)) { |
1895 | 0 | do { |
1896 | 0 | ht->pDestructor(zv); |
1897 | 0 | } while (++zv != end); |
1898 | 0 | } else { |
1899 | 0 | do { |
1900 | 0 | if (EXPECTED(Z_TYPE_P(zv) != IS_UNDEF)) { |
1901 | 0 | ht->pDestructor(zv); |
1902 | 0 | } |
1903 | 0 | } while (++zv != end); |
1904 | 0 | } |
1905 | 0 | } |
1906 | 0 | } |
1907 | 0 | } else { |
1908 | 0 | Bucket *p = ht->arData; |
1909 | 0 | Bucket *end = p + ht->nNumUsed; |
1910 | |
|
1911 | 0 | if (ht->pDestructor) { |
1912 | 0 | if (HT_HAS_STATIC_KEYS_ONLY(ht)) { |
1913 | 0 | if (HT_IS_WITHOUT_HOLES(ht)) { |
1914 | 0 | do { |
1915 | 0 | ht->pDestructor(&p->val); |
1916 | 0 | } while (++p != end); |
1917 | 0 | } else { |
1918 | 0 | do { |
1919 | 0 | if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) { |
1920 | 0 | ht->pDestructor(&p->val); |
1921 | 0 | } |
1922 | 0 | } while (++p != end); |
1923 | 0 | } |
1924 | 0 | } else if (HT_IS_WITHOUT_HOLES(ht)) { |
1925 | 0 | do { |
1926 | 0 | ht->pDestructor(&p->val); |
1927 | 0 | if (EXPECTED(p->key)) { |
1928 | 0 | zend_string_release(p->key); |
1929 | 0 | } |
1930 | 0 | } while (++p != end); |
1931 | 0 | } else { |
1932 | 0 | do { |
1933 | 0 | if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) { |
1934 | 0 | ht->pDestructor(&p->val); |
1935 | 0 | if (EXPECTED(p->key)) { |
1936 | 0 | zend_string_release(p->key); |
1937 | 0 | } |
1938 | 0 | } |
1939 | 0 | } while (++p != end); |
1940 | 0 | } |
1941 | 0 | } else { |
1942 | 0 | if (!HT_HAS_STATIC_KEYS_ONLY(ht)) { |
1943 | 0 | do { |
1944 | 0 | if (EXPECTED(p->key)) { |
1945 | 0 | zend_string_release(p->key); |
1946 | 0 | } |
1947 | 0 | } while (++p != end); |
1948 | 0 | } |
1949 | 0 | } |
1950 | 0 | HT_HASH_RESET(ht); |
1951 | 0 | } |
1952 | 0 | } |
1953 | 48.1k | ht->nNumUsed = 0; |
1954 | 48.1k | ht->nNumOfElements = 0; |
1955 | 48.1k | ht->nNextFreeElement = ZEND_LONG_MIN; |
1956 | 48.1k | ht->nInternalPointer = 0; |
1957 | 48.1k | } |
1958 | | |
1959 | | ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht) |
1960 | 0 | { |
1961 | 0 | Bucket *p, *end; |
1962 | |
|
1963 | 0 | IS_CONSISTENT(ht); |
1964 | 0 | HT_ASSERT_RC1(ht); |
1965 | |
|
1966 | 0 | if (ht->nNumUsed) { |
1967 | 0 | ZEND_ASSERT(!HT_IS_PACKED(ht)); |
1968 | 0 | p = ht->arData; |
1969 | 0 | end = p + ht->nNumUsed; |
1970 | 0 | if (HT_HAS_STATIC_KEYS_ONLY(ht)) { |
1971 | 0 | do { |
1972 | 0 | i_zval_ptr_dtor(&p->val); |
1973 | 0 | } while (++p != end); |
1974 | 0 | } else if (HT_IS_WITHOUT_HOLES(ht)) { |
1975 | 0 | do { |
1976 | 0 | i_zval_ptr_dtor(&p->val); |
1977 | 0 | if (EXPECTED(p->key)) { |
1978 | 0 | zend_string_release(p->key); |
1979 | 0 | } |
1980 | 0 | } while (++p != end); |
1981 | 0 | } else { |
1982 | 0 | do { |
1983 | 0 | if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) { |
1984 | 0 | i_zval_ptr_dtor(&p->val); |
1985 | 0 | if (EXPECTED(p->key)) { |
1986 | 0 | zend_string_release(p->key); |
1987 | 0 | } |
1988 | 0 | } |
1989 | 0 | } while (++p != end); |
1990 | 0 | } |
1991 | 0 | HT_HASH_RESET(ht); |
1992 | 0 | } |
1993 | 0 | ht->nNumUsed = 0; |
1994 | 0 | ht->nNumOfElements = 0; |
1995 | 0 | ht->nNextFreeElement = ZEND_LONG_MIN; |
1996 | 0 | ht->nInternalPointer = 0; |
1997 | 0 | } |
1998 | | |
1999 | | ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht) |
2000 | 0 | { |
2001 | 0 | uint32_t idx; |
2002 | |
|
2003 | 0 | IS_CONSISTENT(ht); |
2004 | 0 | HT_ASSERT_RC1(ht); |
2005 | |
|
2006 | 0 | if (HT_IS_PACKED(ht)) { |
2007 | 0 | zval *zv = ht->arPacked; |
2008 | |
|
2009 | 0 | for (idx = 0; idx < ht->nNumUsed; idx++, zv++) { |
2010 | 0 | if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue; |
2011 | 0 | _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv); |
2012 | 0 | } |
2013 | 0 | } else { |
2014 | 0 | Bucket *p = ht->arData; |
2015 | |
|
2016 | 0 | for (idx = 0; idx < ht->nNumUsed; idx++, p++) { |
2017 | 0 | if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue; |
2018 | 0 | _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); |
2019 | 0 | } |
2020 | 0 | } |
2021 | 0 | if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) { |
2022 | 0 | pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT); |
2023 | 0 | } |
2024 | |
|
2025 | 0 | SET_INCONSISTENT(HT_DESTROYED); |
2026 | 0 | } |
2027 | | |
2028 | | ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht) |
2029 | 96.2k | { |
2030 | 96.2k | uint32_t idx; |
2031 | | |
2032 | 96.2k | IS_CONSISTENT(ht); |
2033 | 96.2k | HT_ASSERT_RC1(ht); |
2034 | | |
2035 | 96.2k | idx = ht->nNumUsed; |
2036 | 96.2k | if (HT_IS_PACKED(ht)) { |
2037 | 0 | zval *zv = ht->arPacked + ht->nNumUsed; |
2038 | |
|
2039 | 0 | while (idx > 0) { |
2040 | 0 | idx--; |
2041 | 0 | zv--; |
2042 | 0 | if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue; |
2043 | 0 | _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv); |
2044 | 0 | } |
2045 | 96.2k | } else { |
2046 | 96.2k | Bucket *p = ht->arData + ht->nNumUsed; |
2047 | | |
2048 | 288k | while (idx > 0) { |
2049 | 192k | idx--; |
2050 | 192k | p--; |
2051 | 192k | if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue; |
2052 | 192k | _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); |
2053 | 192k | } |
2054 | 96.2k | } |
2055 | | |
2056 | 96.2k | if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) { |
2057 | 48.1k | pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT); |
2058 | 48.1k | } |
2059 | | |
2060 | 96.2k | SET_INCONSISTENT(HT_DESTROYED); |
2061 | 96.2k | } |
2062 | | |
2063 | | /* This is used to recurse elements and selectively delete certain entries |
2064 | | * from a hashtable. apply_func() receives the data and decides if the entry |
2065 | | * should be deleted or recursion should be stopped. The following three |
2066 | | * return codes are possible: |
2067 | | * ZEND_HASH_APPLY_KEEP - continue |
2068 | | * ZEND_HASH_APPLY_STOP - stop iteration |
2069 | | * ZEND_HASH_APPLY_REMOVE - delete the element, combinable with the former |
2070 | | */ |
2071 | | |
2072 | | ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func) |
2073 | 2 | { |
2074 | 2 | uint32_t idx; |
2075 | 2 | int result; |
2076 | | |
2077 | 2 | IS_CONSISTENT(ht); |
2078 | 2 | if (HT_IS_PACKED(ht)) { |
2079 | 0 | for (idx = 0; idx < ht->nNumUsed; idx++) { |
2080 | 0 | zval *zv = ht->arPacked + idx; |
2081 | |
|
2082 | 0 | if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue; |
2083 | 0 | result = apply_func(zv); |
2084 | |
|
2085 | 0 | if (result & ZEND_HASH_APPLY_REMOVE) { |
2086 | 0 | HT_ASSERT_RC1(ht); |
2087 | 0 | _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv); |
2088 | 0 | } |
2089 | 0 | if (result & ZEND_HASH_APPLY_STOP) { |
2090 | 0 | break; |
2091 | 0 | } |
2092 | 0 | } |
2093 | 2 | } else { |
2094 | 28 | for (idx = 0; idx < ht->nNumUsed; idx++) { |
2095 | 26 | Bucket *p = ht->arData + idx; |
2096 | | |
2097 | 26 | if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue; |
2098 | 26 | result = apply_func(&p->val); |
2099 | | |
2100 | 26 | if (result & ZEND_HASH_APPLY_REMOVE) { |
2101 | 0 | HT_ASSERT_RC1(ht); |
2102 | 0 | _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); |
2103 | 0 | } |
2104 | 26 | if (result & ZEND_HASH_APPLY_STOP) { |
2105 | 0 | break; |
2106 | 0 | } |
2107 | 26 | } |
2108 | 2 | } |
2109 | 2 | } |
2110 | | |
2111 | | |
2112 | | ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument) |
2113 | 0 | { |
2114 | 0 | uint32_t idx; |
2115 | 0 | int result; |
2116 | |
|
2117 | 0 | IS_CONSISTENT(ht); |
2118 | 0 | if (HT_IS_PACKED(ht)) { |
2119 | 0 | for (idx = 0; idx < ht->nNumUsed; idx++) { |
2120 | 0 | zval *zv = ht->arPacked + idx; |
2121 | 0 | if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue; |
2122 | 0 | result = apply_func(zv, argument); |
2123 | |
|
2124 | 0 | if (result & ZEND_HASH_APPLY_REMOVE) { |
2125 | 0 | HT_ASSERT_RC1(ht); |
2126 | 0 | _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv); |
2127 | 0 | } |
2128 | 0 | if (result & ZEND_HASH_APPLY_STOP) { |
2129 | 0 | break; |
2130 | 0 | } |
2131 | 0 | } |
2132 | 0 | } else { |
2133 | 0 | for (idx = 0; idx < ht->nNumUsed; idx++) { |
2134 | 0 | Bucket *p = ht->arData + idx; |
2135 | 0 | if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue; |
2136 | 0 | result = apply_func(&p->val, argument); |
2137 | |
|
2138 | 0 | if (result & ZEND_HASH_APPLY_REMOVE) { |
2139 | 0 | HT_ASSERT_RC1(ht); |
2140 | 0 | _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); |
2141 | 0 | } |
2142 | 0 | if (result & ZEND_HASH_APPLY_STOP) { |
2143 | 0 | break; |
2144 | 0 | } |
2145 | 0 | } |
2146 | 0 | } |
2147 | 0 | } |
2148 | | |
2149 | | |
2150 | | ZEND_API void zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...) |
2151 | 0 | { |
2152 | 0 | uint32_t idx; |
2153 | 0 | va_list args; |
2154 | 0 | zend_hash_key hash_key; |
2155 | 0 | int result; |
2156 | |
|
2157 | 0 | IS_CONSISTENT(ht); |
2158 | |
|
2159 | 0 | if (HT_IS_PACKED(ht)) { |
2160 | 0 | for (idx = 0; idx < ht->nNumUsed; idx++) { |
2161 | 0 | zval *zv = ht->arPacked + idx; |
2162 | |
|
2163 | 0 | if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue; |
2164 | 0 | va_start(args, num_args); |
2165 | 0 | hash_key.h = idx; |
2166 | 0 | hash_key.key = NULL; |
2167 | |
|
2168 | 0 | result = apply_func(zv, num_args, args, &hash_key); |
2169 | |
|
2170 | 0 | if (result & ZEND_HASH_APPLY_REMOVE) { |
2171 | 0 | HT_ASSERT_RC1(ht); |
2172 | 0 | _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv); |
2173 | 0 | } |
2174 | 0 | if (result & ZEND_HASH_APPLY_STOP) { |
2175 | 0 | va_end(args); |
2176 | 0 | break; |
2177 | 0 | } |
2178 | 0 | va_end(args); |
2179 | 0 | } |
2180 | 0 | } else { |
2181 | 0 | for (idx = 0; idx < ht->nNumUsed; idx++) { |
2182 | 0 | Bucket *p = ht->arData + idx; |
2183 | |
|
2184 | 0 | if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue; |
2185 | 0 | va_start(args, num_args); |
2186 | 0 | hash_key.h = p->h; |
2187 | 0 | hash_key.key = p->key; |
2188 | |
|
2189 | 0 | result = apply_func(&p->val, num_args, args, &hash_key); |
2190 | |
|
2191 | 0 | if (result & ZEND_HASH_APPLY_REMOVE) { |
2192 | 0 | HT_ASSERT_RC1(ht); |
2193 | 0 | _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); |
2194 | 0 | } |
2195 | 0 | if (result & ZEND_HASH_APPLY_STOP) { |
2196 | 0 | va_end(args); |
2197 | 0 | break; |
2198 | 0 | } |
2199 | 0 | va_end(args); |
2200 | 0 | } |
2201 | 0 | } |
2202 | 0 | } |
2203 | | |
2204 | | |
2205 | | ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func) |
2206 | 48.1k | { |
2207 | 48.1k | uint32_t idx; |
2208 | 48.1k | int result; |
2209 | | |
2210 | 48.1k | IS_CONSISTENT(ht); |
2211 | | |
2212 | 48.1k | idx = ht->nNumUsed; |
2213 | 48.1k | if (HT_IS_PACKED(ht)) { |
2214 | 0 | zval *zv; |
2215 | |
|
2216 | 0 | while (idx > 0) { |
2217 | 0 | idx--; |
2218 | 0 | zv = ht->arPacked + idx; |
2219 | 0 | if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue; |
2220 | | |
2221 | 0 | result = apply_func(zv); |
2222 | |
|
2223 | 0 | if (result & ZEND_HASH_APPLY_REMOVE) { |
2224 | 0 | HT_ASSERT_RC1(ht); |
2225 | 0 | _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv); |
2226 | 0 | } |
2227 | 0 | if (result & ZEND_HASH_APPLY_STOP) { |
2228 | 0 | break; |
2229 | 0 | } |
2230 | 0 | } |
2231 | 48.1k | } else { |
2232 | 48.1k | Bucket *p; |
2233 | | |
2234 | 240k | while (idx > 0) { |
2235 | 192k | idx--; |
2236 | 192k | p = ht->arData + idx; |
2237 | 192k | if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue; |
2238 | | |
2239 | 192k | result = apply_func(&p->val); |
2240 | | |
2241 | 192k | if (result & ZEND_HASH_APPLY_REMOVE) { |
2242 | 0 | HT_ASSERT_RC1(ht); |
2243 | 0 | _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); |
2244 | 0 | } |
2245 | 192k | if (result & ZEND_HASH_APPLY_STOP) { |
2246 | 0 | break; |
2247 | 0 | } |
2248 | 192k | } |
2249 | 48.1k | } |
2250 | 48.1k | } |
2251 | | |
2252 | | |
2253 | | ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, const HashTable *source, copy_ctor_func_t pCopyConstructor) |
2254 | 0 | { |
2255 | 0 | uint32_t idx; |
2256 | 0 | zval *new_entry, *data; |
2257 | |
|
2258 | 0 | IS_CONSISTENT(source); |
2259 | 0 | IS_CONSISTENT(target); |
2260 | 0 | HT_ASSERT_RC1(target); |
2261 | |
|
2262 | 0 | if (HT_IS_PACKED(source)) { |
2263 | 0 | for (idx = 0; idx < source->nNumUsed; idx++) { |
2264 | 0 | zval *zv = source->arPacked + idx; |
2265 | 0 | if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue; |
2266 | | |
2267 | 0 | new_entry = zend_hash_index_update(target, idx, zv); |
2268 | 0 | if (pCopyConstructor) { |
2269 | 0 | pCopyConstructor(new_entry); |
2270 | 0 | } |
2271 | 0 | } |
2272 | 0 | return; |
2273 | 0 | } |
2274 | | |
2275 | 0 | for (idx = 0; idx < source->nNumUsed; idx++) { |
2276 | 0 | Bucket *p = source->arData + idx; |
2277 | |
|
2278 | 0 | if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue; |
2279 | | |
2280 | | /* INDIRECT element may point to UNDEF-ined slots */ |
2281 | 0 | data = &p->val; |
2282 | 0 | if (Z_TYPE_P(data) == IS_INDIRECT) { |
2283 | 0 | data = Z_INDIRECT_P(data); |
2284 | 0 | if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) { |
2285 | 0 | continue; |
2286 | 0 | } |
2287 | 0 | } |
2288 | 0 | if (p->key) { |
2289 | 0 | new_entry = zend_hash_update(target, p->key, data); |
2290 | 0 | } else { |
2291 | 0 | new_entry = zend_hash_index_update(target, p->h, data); |
2292 | 0 | } |
2293 | 0 | if (pCopyConstructor) { |
2294 | 0 | pCopyConstructor(new_entry); |
2295 | 0 | } |
2296 | 0 | } |
2297 | 0 | } |
2298 | | |
2299 | | |
2300 | | static zend_always_inline bool zend_array_dup_value(const HashTable *source, zval *data, zval *dest, bool packed, bool with_holes) |
2301 | 0 | { |
2302 | 0 | if (with_holes) { |
2303 | 0 | if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) { |
2304 | 0 | data = Z_INDIRECT_P(data); |
2305 | 0 | } |
2306 | 0 | if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) { |
2307 | 0 | return 0; |
2308 | 0 | } |
2309 | 0 | } else if (!packed) { |
2310 | | /* INDIRECT element may point to UNDEF-ined slots */ |
2311 | 0 | if (Z_TYPE_INFO_P(data) == IS_INDIRECT) { |
2312 | 0 | data = Z_INDIRECT_P(data); |
2313 | 0 | if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) { |
2314 | 0 | return 0; |
2315 | 0 | } |
2316 | 0 | } |
2317 | 0 | } |
2318 | | |
2319 | 0 | do { |
2320 | 0 | if (Z_OPT_REFCOUNTED_P(data)) { |
2321 | 0 | if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 && |
2322 | 0 | (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY || |
2323 | 0 | Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) { |
2324 | 0 | data = Z_REFVAL_P(data); |
2325 | 0 | if (!Z_OPT_REFCOUNTED_P(data)) { |
2326 | 0 | break; |
2327 | 0 | } |
2328 | 0 | } |
2329 | 0 | Z_ADDREF_P(data); |
2330 | 0 | } |
2331 | 0 | } while (0); |
2332 | 0 | ZVAL_COPY_VALUE(dest, data); |
2333 | |
|
2334 | 0 | return 1; |
2335 | 0 | } |
2336 | | |
2337 | | static zend_always_inline bool zend_array_dup_element(const HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, bool packed, bool static_keys, bool with_holes) |
2338 | 0 | { |
2339 | 0 | if (!zend_array_dup_value(source, &p->val, &q->val, packed, with_holes)) { |
2340 | 0 | return 0; |
2341 | 0 | } |
2342 | | |
2343 | 0 | if (!packed) { |
2344 | 0 | uint32_t nIndex; |
2345 | |
|
2346 | 0 | q->h = p->h; |
2347 | 0 | q->key = p->key; |
2348 | 0 | if (!static_keys && q->key) { |
2349 | 0 | zend_string_addref(q->key); |
2350 | 0 | } |
2351 | |
|
2352 | 0 | nIndex = q->h | target->nTableMask; |
2353 | 0 | Z_NEXT(q->val) = HT_HASH(target, nIndex); |
2354 | 0 | HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx); |
2355 | 0 | } |
2356 | 0 | return 1; |
2357 | 0 | } |
2358 | | |
2359 | | // We need to duplicate iterators to be able to search through all copy-on-write copies to find the actually iterated HashTable and position back |
2360 | 0 | static void zend_array_dup_ht_iterators(const HashTable *source, HashTable *target) { |
2361 | 0 | uint32_t iter_index = 0; |
2362 | 0 | uint32_t end_index = EG(ht_iterators_used); |
2363 | |
|
2364 | 0 | while (iter_index != end_index) { |
2365 | 0 | HashTableIterator *iter = &EG(ht_iterators)[iter_index]; |
2366 | 0 | if (iter->ht == source) { |
2367 | 0 | uint32_t copy_idx = zend_hash_iterator_add(target, iter->pos); |
2368 | | /* Refetch iter because the memory may be reallocated. */ |
2369 | 0 | iter = &EG(ht_iterators)[iter_index]; |
2370 | 0 | HashTableIterator *copy_iter = EG(ht_iterators) + copy_idx; |
2371 | 0 | copy_iter->next_copy = iter->next_copy; |
2372 | 0 | iter->next_copy = copy_idx; |
2373 | 0 | } |
2374 | 0 | iter_index++; |
2375 | 0 | } |
2376 | 0 | } |
2377 | | |
2378 | | static zend_always_inline void zend_array_dup_packed_elements(const HashTable *source, HashTable *target, bool with_holes) |
2379 | 0 | { |
2380 | 0 | zval *p = source->arPacked; |
2381 | 0 | zval *q = target->arPacked; |
2382 | 0 | const zval *end = p + source->nNumUsed; |
2383 | |
|
2384 | 0 | do { |
2385 | 0 | if (!zend_array_dup_value(source, p, q, true, with_holes)) { |
2386 | 0 | if (with_holes) { |
2387 | 0 | ZVAL_UNDEF(q); |
2388 | 0 | } |
2389 | 0 | } |
2390 | 0 | p++; q++; |
2391 | 0 | } while (p != end); |
2392 | |
|
2393 | 0 | if (UNEXPECTED(HT_HAS_ITERATORS(source))) { |
2394 | 0 | zend_array_dup_ht_iterators(source, target); |
2395 | 0 | } |
2396 | 0 | } |
2397 | | |
2398 | | static zend_always_inline uint32_t zend_array_dup_elements(const HashTable *source, HashTable *target, bool static_keys, bool with_holes) |
2399 | 0 | { |
2400 | 0 | uint32_t idx = 0; |
2401 | 0 | Bucket *p = source->arData; |
2402 | 0 | Bucket *q = target->arData; |
2403 | 0 | const Bucket *end = p + source->nNumUsed; |
2404 | |
|
2405 | 0 | if (UNEXPECTED(HT_HAS_ITERATORS(source))) { |
2406 | 0 | zend_array_dup_ht_iterators(source, target); |
2407 | 0 | } |
2408 | |
|
2409 | 0 | do { |
2410 | 0 | if (!zend_array_dup_element(source, target, idx, p, q, false, static_keys, with_holes)) { |
2411 | 0 | uint32_t target_idx = idx; |
2412 | |
|
2413 | 0 | idx++; p++; |
2414 | 0 | if (EXPECTED(!HT_HAS_ITERATORS(target))) { |
2415 | 0 | while (p != end) { |
2416 | 0 | if (zend_array_dup_element(source, target, target_idx, p, q, false, static_keys, with_holes)) { |
2417 | 0 | if (source->nInternalPointer == idx) { |
2418 | 0 | target->nInternalPointer = target_idx; |
2419 | 0 | } |
2420 | 0 | target_idx++; q++; |
2421 | 0 | } |
2422 | 0 | idx++; p++; |
2423 | 0 | } |
2424 | 0 | } else { |
2425 | 0 | target->nNumUsed = source->nNumUsed; |
2426 | 0 | uint32_t iter_pos = zend_hash_iterators_lower_pos(target, idx); |
2427 | |
|
2428 | 0 | while (p != end) { |
2429 | 0 | if (zend_array_dup_element(source, target, target_idx, p, q, false, static_keys, with_holes)) { |
2430 | 0 | if (source->nInternalPointer == idx) { |
2431 | 0 | target->nInternalPointer = target_idx; |
2432 | 0 | } |
2433 | 0 | if (UNEXPECTED(idx >= iter_pos)) { |
2434 | 0 | do { |
2435 | 0 | zend_hash_iterators_update(target, iter_pos, target_idx); |
2436 | 0 | iter_pos = zend_hash_iterators_lower_pos(target, iter_pos + 1); |
2437 | 0 | } while (iter_pos < idx); |
2438 | 0 | } |
2439 | 0 | target_idx++; q++; |
2440 | 0 | } |
2441 | 0 | idx++; p++; |
2442 | 0 | } |
2443 | 0 | } |
2444 | 0 | return target_idx; |
2445 | 0 | } |
2446 | 0 | idx++; p++; q++; |
2447 | 0 | } while (p != end); |
2448 | 0 | return idx; |
2449 | 0 | } |
2450 | | |
2451 | | ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(const HashTable *source) |
2452 | 0 | { |
2453 | 0 | uint32_t idx; |
2454 | 0 | HashTable *target; |
2455 | |
|
2456 | 0 | IS_CONSISTENT(source); |
2457 | |
|
2458 | 0 | ALLOC_HASHTABLE(target); |
2459 | 0 | GC_SET_REFCOUNT(target, 1); |
2460 | 0 | GC_TYPE_INFO(target) = GC_ARRAY; |
2461 | |
|
2462 | 0 | target->pDestructor = ZVAL_PTR_DTOR; |
2463 | |
|
2464 | 0 | if (source->nNumOfElements == 0) { |
2465 | 0 | HT_FLAGS(target) = HASH_FLAG_UNINITIALIZED; |
2466 | 0 | target->nTableMask = HT_MIN_MASK; |
2467 | 0 | target->nNumUsed = 0; |
2468 | 0 | target->nNumOfElements = 0; |
2469 | 0 | target->nNextFreeElement = source->nNextFreeElement; |
2470 | 0 | target->nInternalPointer = 0; |
2471 | 0 | target->nTableSize = HT_MIN_SIZE; |
2472 | 0 | HT_SET_DATA_ADDR(target, &uninitialized_bucket); |
2473 | 0 | } else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) { |
2474 | 0 | ZEND_ASSERT(!(HT_FLAGS(source) & HASH_FLAG_HAS_EMPTY_IND)); |
2475 | 0 | HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK; |
2476 | 0 | target->nTableMask = source->nTableMask; |
2477 | 0 | target->nNumUsed = source->nNumUsed; |
2478 | 0 | target->nNumOfElements = source->nNumOfElements; |
2479 | 0 | target->nNextFreeElement = source->nNextFreeElement; |
2480 | 0 | target->nTableSize = source->nTableSize; |
2481 | 0 | if (HT_IS_PACKED(source)) { |
2482 | 0 | HT_SET_DATA_ADDR(target, emalloc(HT_PACKED_SIZE(target))); |
2483 | 0 | target->nInternalPointer = source->nInternalPointer; |
2484 | 0 | memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_PACKED_USED_SIZE(source)); |
2485 | 0 | } else { |
2486 | 0 | HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target))); |
2487 | 0 | target->nInternalPointer = source->nInternalPointer; |
2488 | 0 | memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source)); |
2489 | 0 | } |
2490 | 0 | } else if (HT_IS_PACKED(source)) { |
2491 | 0 | ZEND_ASSERT(!(HT_FLAGS(source) & HASH_FLAG_HAS_EMPTY_IND)); |
2492 | 0 | HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK; |
2493 | 0 | target->nTableMask = HT_MIN_MASK; |
2494 | 0 | target->nNumUsed = source->nNumUsed; |
2495 | 0 | target->nNumOfElements = source->nNumOfElements; |
2496 | 0 | target->nNextFreeElement = source->nNextFreeElement; |
2497 | 0 | target->nTableSize = source->nTableSize; |
2498 | 0 | HT_SET_DATA_ADDR(target, emalloc(HT_PACKED_SIZE_EX(target->nTableSize, HT_MIN_MASK))); |
2499 | 0 | target->nInternalPointer = |
2500 | 0 | (source->nInternalPointer < source->nNumUsed) ? |
2501 | 0 | source->nInternalPointer : 0; |
2502 | |
|
2503 | 0 | HT_HASH_RESET_PACKED(target); |
2504 | |
|
2505 | 0 | if (HT_IS_WITHOUT_HOLES(target)) { |
2506 | 0 | zend_array_dup_packed_elements(source, target, false); |
2507 | 0 | } else { |
2508 | 0 | zend_array_dup_packed_elements(source, target, true); |
2509 | 0 | } |
2510 | 0 | } else { |
2511 | | /* Indirects are removed during duplication, remove HASH_FLAG_HAS_EMPTY_IND accordingly. */ |
2512 | 0 | HT_FLAGS(target) = HT_FLAGS(source) & (HASH_FLAG_MASK & ~HASH_FLAG_HAS_EMPTY_IND); |
2513 | 0 | target->nTableMask = source->nTableMask; |
2514 | 0 | target->nNextFreeElement = source->nNextFreeElement; |
2515 | 0 | target->nInternalPointer = |
2516 | 0 | (source->nInternalPointer < source->nNumUsed) ? |
2517 | 0 | source->nInternalPointer : 0; |
2518 | |
|
2519 | 0 | target->nTableSize = source->nTableSize; |
2520 | 0 | HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target))); |
2521 | 0 | HT_HASH_RESET(target); |
2522 | |
|
2523 | 0 | if (HT_HAS_STATIC_KEYS_ONLY(target)) { |
2524 | 0 | if (HT_IS_WITHOUT_HOLES(source)) { |
2525 | 0 | idx = zend_array_dup_elements(source, target, true, false); |
2526 | 0 | } else { |
2527 | 0 | idx = zend_array_dup_elements(source, target, true, true); |
2528 | 0 | } |
2529 | 0 | } else { |
2530 | 0 | if (HT_IS_WITHOUT_HOLES(source)) { |
2531 | 0 | idx = zend_array_dup_elements(source, target, false, false); |
2532 | 0 | } else { |
2533 | 0 | idx = zend_array_dup_elements(source, target, false, true); |
2534 | 0 | } |
2535 | 0 | } |
2536 | 0 | target->nNumUsed = idx; |
2537 | 0 | target->nNumOfElements = idx; |
2538 | 0 | } |
2539 | 0 | return target; |
2540 | 0 | } |
2541 | | |
2542 | | ZEND_API HashTable* zend_array_to_list(const HashTable *source) |
2543 | 0 | { |
2544 | 0 | HashTable *result = _zend_new_array(zend_hash_num_elements(source)); |
2545 | 0 | zend_hash_real_init_packed(result); |
2546 | |
|
2547 | 0 | ZEND_HASH_FILL_PACKED(result) { |
2548 | 0 | zval *entry; |
2549 | |
|
2550 | 0 | ZEND_HASH_FOREACH_VAL(source, entry) { |
2551 | 0 | if (UNEXPECTED(Z_ISREF_P(entry) && Z_REFCOUNT_P(entry) == 1)) { |
2552 | 0 | entry = Z_REFVAL_P(entry); |
2553 | 0 | } |
2554 | 0 | Z_TRY_ADDREF_P(entry); |
2555 | 0 | ZEND_HASH_FILL_ADD(entry); |
2556 | 0 | } ZEND_HASH_FOREACH_END(); |
2557 | 0 | } ZEND_HASH_FILL_END(); |
2558 | |
|
2559 | 0 | return result; |
2560 | 0 | } |
2561 | | |
2562 | | |
2563 | | ZEND_API void ZEND_FASTCALL zend_hash_merge(HashTable *target, const HashTable *source, copy_ctor_func_t pCopyConstructor, bool overwrite) |
2564 | 0 | { |
2565 | 0 | uint32_t idx; |
2566 | 0 | Bucket *p; |
2567 | 0 | zval *t, *s; |
2568 | |
|
2569 | 0 | IS_CONSISTENT(source); |
2570 | 0 | IS_CONSISTENT(target); |
2571 | 0 | HT_ASSERT_RC1(target); |
2572 | |
|
2573 | 0 | if (overwrite) { |
2574 | 0 | if (HT_IS_PACKED(source)) { |
2575 | 0 | for (idx = 0; idx < source->nNumUsed; idx++) { |
2576 | 0 | s = source->arPacked + idx; |
2577 | 0 | if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) { |
2578 | 0 | continue; |
2579 | 0 | } |
2580 | 0 | t = zend_hash_index_update(target, idx, s); |
2581 | 0 | if (pCopyConstructor) { |
2582 | 0 | pCopyConstructor(t); |
2583 | 0 | } |
2584 | 0 | } |
2585 | 0 | return; |
2586 | 0 | } |
2587 | | |
2588 | 0 | for (idx = 0; idx < source->nNumUsed; idx++) { |
2589 | 0 | p = source->arData + idx; |
2590 | 0 | s = &p->val; |
2591 | 0 | if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) { |
2592 | 0 | s = Z_INDIRECT_P(s); |
2593 | 0 | } |
2594 | 0 | if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) { |
2595 | 0 | continue; |
2596 | 0 | } |
2597 | 0 | if (p->key) { |
2598 | 0 | t = _zend_hash_add_or_update_i(target, p->key, s, HASH_UPDATE | HASH_UPDATE_INDIRECT); |
2599 | 0 | if (pCopyConstructor) { |
2600 | 0 | pCopyConstructor(t); |
2601 | 0 | } |
2602 | 0 | } else { |
2603 | 0 | t = zend_hash_index_update(target, p->h, s); |
2604 | 0 | if (pCopyConstructor) { |
2605 | 0 | pCopyConstructor(t); |
2606 | 0 | } |
2607 | 0 | } |
2608 | 0 | } |
2609 | 0 | } else { |
2610 | 0 | if (HT_IS_PACKED(source)) { |
2611 | 0 | for (idx = 0; idx < source->nNumUsed; idx++) { |
2612 | 0 | s = source->arPacked + idx; |
2613 | 0 | if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) { |
2614 | 0 | continue; |
2615 | 0 | } |
2616 | 0 | t = zend_hash_index_add(target, idx, s); |
2617 | 0 | if (t && pCopyConstructor) { |
2618 | 0 | pCopyConstructor(t); |
2619 | 0 | } |
2620 | 0 | } |
2621 | 0 | return; |
2622 | 0 | } |
2623 | | |
2624 | 0 | for (idx = 0; idx < source->nNumUsed; idx++) { |
2625 | 0 | p = source->arData + idx; |
2626 | 0 | s = &p->val; |
2627 | 0 | if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) { |
2628 | 0 | s = Z_INDIRECT_P(s); |
2629 | 0 | } |
2630 | 0 | if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) { |
2631 | 0 | continue; |
2632 | 0 | } |
2633 | 0 | if (p->key) { |
2634 | 0 | t = _zend_hash_add_or_update_i(target, p->key, s, HASH_ADD | HASH_UPDATE_INDIRECT); |
2635 | 0 | if (t && pCopyConstructor) { |
2636 | 0 | pCopyConstructor(t); |
2637 | 0 | } |
2638 | 0 | } else { |
2639 | 0 | t = zend_hash_index_add(target, p->h, s); |
2640 | 0 | if (t && pCopyConstructor) { |
2641 | 0 | pCopyConstructor(t); |
2642 | 0 | } |
2643 | 0 | } |
2644 | 0 | } |
2645 | 0 | } |
2646 | 0 | } |
2647 | | |
2648 | | |
2649 | | static bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, zend_ulong h, zend_string *key, void *pParam, merge_checker_func_t merge_checker_func) |
2650 | 0 | { |
2651 | 0 | zend_hash_key hash_key; |
2652 | |
|
2653 | 0 | hash_key.h = h; |
2654 | 0 | hash_key.key = key; |
2655 | 0 | return merge_checker_func(target, source_data, &hash_key, pParam); |
2656 | 0 | } |
2657 | | |
2658 | | |
2659 | | ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, const HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam) |
2660 | 0 | { |
2661 | 0 | uint32_t idx; |
2662 | 0 | Bucket *p; |
2663 | 0 | zval *t; |
2664 | |
|
2665 | 0 | IS_CONSISTENT(source); |
2666 | 0 | IS_CONSISTENT(target); |
2667 | 0 | HT_ASSERT_RC1(target); |
2668 | |
|
2669 | 0 | ZEND_ASSERT(!HT_IS_PACKED(source)); |
2670 | 0 | for (idx = 0; idx < source->nNumUsed; idx++) { |
2671 | 0 | p = source->arData + idx; |
2672 | 0 | if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue; |
2673 | 0 | if (zend_hash_replace_checker_wrapper(target, &p->val, p->h, p->key, pParam, pMergeSource)) { |
2674 | 0 | t = zend_hash_update(target, p->key, &p->val); |
2675 | 0 | if (pCopyConstructor) { |
2676 | 0 | pCopyConstructor(t); |
2677 | 0 | } |
2678 | 0 | } |
2679 | 0 | } |
2680 | 0 | } |
2681 | | |
2682 | | |
2683 | | /* Returns the hash table data if found and NULL if not. */ |
2684 | | ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key) |
2685 | 1.87M | { |
2686 | 1.87M | Bucket *p; |
2687 | | |
2688 | 1.87M | IS_CONSISTENT(ht); |
2689 | | |
2690 | 1.87M | (void)zend_string_hash_val(key); |
2691 | 1.87M | p = zend_hash_find_bucket(ht, key); |
2692 | 1.87M | return p ? &p->val : NULL; |
2693 | 1.87M | } |
2694 | | |
2695 | | ZEND_API zval* ZEND_FASTCALL zend_hash_find_known_hash(const HashTable *ht, const zend_string *key) |
2696 | 3.77k | { |
2697 | 3.77k | Bucket *p; |
2698 | | |
2699 | 3.77k | IS_CONSISTENT(ht); |
2700 | | |
2701 | 3.77k | p = zend_hash_find_bucket(ht, key); |
2702 | 3.77k | return p ? &p->val : NULL; |
2703 | 3.77k | } |
2704 | | |
2705 | | ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len) |
2706 | 2.97M | { |
2707 | 2.97M | zend_ulong h; |
2708 | 2.97M | Bucket *p; |
2709 | | |
2710 | 2.97M | IS_CONSISTENT(ht); |
2711 | | |
2712 | 2.97M | h = zend_inline_hash_func(str, len); |
2713 | 2.97M | p = zend_hash_str_find_bucket(ht, str, len, h); |
2714 | 2.97M | return p ? &p->val : NULL; |
2715 | 2.97M | } |
2716 | | |
2717 | | ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h) |
2718 | 31.6k | { |
2719 | 31.6k | Bucket *p; |
2720 | | |
2721 | 31.6k | IS_CONSISTENT(ht); |
2722 | | |
2723 | 31.6k | if (HT_IS_PACKED(ht)) { |
2724 | 122 | if (h < ht->nNumUsed) { |
2725 | 30 | zval *zv = ht->arPacked + h; |
2726 | | |
2727 | 30 | if (Z_TYPE_P(zv) != IS_UNDEF) { |
2728 | 28 | return zv; |
2729 | 28 | } |
2730 | 30 | } |
2731 | 94 | return NULL; |
2732 | 122 | } |
2733 | | |
2734 | 31.5k | p = zend_hash_index_find_bucket(ht, h); |
2735 | 31.5k | return p ? &p->val : NULL; |
2736 | 31.6k | } |
2737 | | |
2738 | | ZEND_API zval* ZEND_FASTCALL _zend_hash_index_find(const HashTable *ht, zend_ulong h) |
2739 | 0 | { |
2740 | 0 | Bucket *p; |
2741 | |
|
2742 | 0 | IS_CONSISTENT(ht); |
2743 | 0 | ZEND_ASSERT(!HT_IS_PACKED(ht)); |
2744 | |
|
2745 | 0 | p = zend_hash_index_find_bucket(ht, h); |
2746 | 0 | return p ? &p->val : NULL; |
2747 | 0 | } |
2748 | | |
2749 | | ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(const HashTable *ht, HashPosition *pos) |
2750 | 0 | { |
2751 | 0 | IS_CONSISTENT(ht); |
2752 | 0 | HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1); |
2753 | 0 | *pos = _zend_hash_get_valid_pos(ht, 0); |
2754 | 0 | } |
2755 | | |
2756 | | |
2757 | | /* This function will be extremely optimized by remembering |
2758 | | * the end of the list |
2759 | | */ |
2760 | | ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(const HashTable *ht, HashPosition *pos) |
2761 | 0 | { |
2762 | 0 | uint32_t idx; |
2763 | |
|
2764 | 0 | IS_CONSISTENT(ht); |
2765 | 0 | HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1); |
2766 | |
|
2767 | 0 | idx = ht->nNumUsed; |
2768 | 0 | if (HT_IS_PACKED(ht)) { |
2769 | 0 | while (idx > 0) { |
2770 | 0 | idx--; |
2771 | 0 | if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) { |
2772 | 0 | *pos = idx; |
2773 | 0 | return; |
2774 | 0 | } |
2775 | 0 | } |
2776 | 0 | } else { |
2777 | 0 | while (idx > 0) { |
2778 | 0 | idx--; |
2779 | 0 | if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) { |
2780 | 0 | *pos = idx; |
2781 | 0 | return; |
2782 | 0 | } |
2783 | 0 | } |
2784 | 0 | } |
2785 | 0 | *pos = ht->nNumUsed; |
2786 | 0 | } |
2787 | | |
2788 | | |
2789 | | ZEND_API zend_result ZEND_FASTCALL zend_hash_move_forward_ex(const HashTable *ht, HashPosition *pos) |
2790 | 0 | { |
2791 | 0 | uint32_t idx; |
2792 | |
|
2793 | 0 | IS_CONSISTENT(ht); |
2794 | 0 | HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1); |
2795 | |
|
2796 | 0 | idx = _zend_hash_get_valid_pos(ht, *pos); |
2797 | 0 | if (idx < ht->nNumUsed) { |
2798 | 0 | if (HT_IS_PACKED(ht)) { |
2799 | 0 | while (1) { |
2800 | 0 | idx++; |
2801 | 0 | if (idx >= ht->nNumUsed) { |
2802 | 0 | *pos = ht->nNumUsed; |
2803 | 0 | return SUCCESS; |
2804 | 0 | } |
2805 | 0 | if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) { |
2806 | 0 | *pos = idx; |
2807 | 0 | return SUCCESS; |
2808 | 0 | } |
2809 | 0 | } |
2810 | 0 | } else { |
2811 | 0 | while (1) { |
2812 | 0 | idx++; |
2813 | 0 | if (idx >= ht->nNumUsed) { |
2814 | 0 | *pos = ht->nNumUsed; |
2815 | 0 | return SUCCESS; |
2816 | 0 | } |
2817 | 0 | if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) { |
2818 | 0 | *pos = idx; |
2819 | 0 | return SUCCESS; |
2820 | 0 | } |
2821 | 0 | } |
2822 | 0 | } |
2823 | 0 | } else { |
2824 | 0 | return FAILURE; |
2825 | 0 | } |
2826 | 0 | } |
2827 | | |
2828 | | ZEND_API zend_result ZEND_FASTCALL zend_hash_move_backwards_ex(const HashTable *ht, HashPosition *pos) |
2829 | 0 | { |
2830 | 0 | uint32_t idx = *pos; |
2831 | |
|
2832 | 0 | IS_CONSISTENT(ht); |
2833 | 0 | HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1); |
2834 | |
|
2835 | 0 | if (idx < ht->nNumUsed) { |
2836 | 0 | if (HT_IS_PACKED(ht)) { |
2837 | 0 | while (idx > 0) { |
2838 | 0 | idx--; |
2839 | 0 | if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) { |
2840 | 0 | *pos = idx; |
2841 | 0 | return SUCCESS; |
2842 | 0 | } |
2843 | 0 | } |
2844 | 0 | } else { |
2845 | 0 | while (idx > 0) { |
2846 | 0 | idx--; |
2847 | 0 | if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) { |
2848 | 0 | *pos = idx; |
2849 | 0 | return SUCCESS; |
2850 | 0 | } |
2851 | 0 | } |
2852 | 0 | } |
2853 | 0 | *pos = ht->nNumUsed; |
2854 | 0 | return SUCCESS; |
2855 | 0 | } else { |
2856 | 0 | return FAILURE; |
2857 | 0 | } |
2858 | 0 | } |
2859 | | |
2860 | | |
2861 | | ZEND_API zend_hash_key_type ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, const HashPosition *pos) |
2862 | 0 | { |
2863 | 0 | uint32_t idx; |
2864 | 0 | Bucket *p; |
2865 | |
|
2866 | 0 | IS_CONSISTENT(ht); |
2867 | 0 | idx = _zend_hash_get_valid_pos(ht, *pos); |
2868 | 0 | if (idx < ht->nNumUsed) { |
2869 | 0 | if (HT_IS_PACKED(ht)) { |
2870 | 0 | *num_index = idx; |
2871 | 0 | return HASH_KEY_IS_LONG; |
2872 | 0 | } |
2873 | 0 | p = ht->arData + idx; |
2874 | 0 | if (p->key) { |
2875 | 0 | *str_index = p->key; |
2876 | 0 | return HASH_KEY_IS_STRING; |
2877 | 0 | } else { |
2878 | 0 | *num_index = p->h; |
2879 | 0 | return HASH_KEY_IS_LONG; |
2880 | 0 | } |
2881 | 0 | } |
2882 | 0 | return HASH_KEY_NON_EXISTENT; |
2883 | 0 | } |
2884 | | |
2885 | | ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, const HashPosition *pos) |
2886 | 0 | { |
2887 | 0 | uint32_t idx; |
2888 | 0 | Bucket *p; |
2889 | |
|
2890 | 0 | IS_CONSISTENT(ht); |
2891 | 0 | idx = _zend_hash_get_valid_pos(ht, *pos); |
2892 | 0 | if (idx >= ht->nNumUsed) { |
2893 | 0 | ZVAL_NULL(key); |
2894 | 0 | } else { |
2895 | 0 | if (HT_IS_PACKED(ht)) { |
2896 | 0 | ZVAL_LONG(key, idx); |
2897 | 0 | return; |
2898 | 0 | } |
2899 | 0 | p = ht->arData + idx; |
2900 | 0 | if (p->key) { |
2901 | 0 | ZVAL_STR_COPY(key, p->key); |
2902 | 0 | } else { |
2903 | 0 | ZVAL_LONG(key, p->h); |
2904 | 0 | } |
2905 | 0 | } |
2906 | 0 | } |
2907 | | |
2908 | | ZEND_API zend_hash_key_type ZEND_FASTCALL zend_hash_get_current_key_type_ex(const HashTable *ht, const HashPosition *pos) |
2909 | 0 | { |
2910 | 0 | uint32_t idx; |
2911 | 0 | Bucket *p; |
2912 | |
|
2913 | 0 | IS_CONSISTENT(ht); |
2914 | 0 | idx = _zend_hash_get_valid_pos(ht, *pos); |
2915 | 0 | if (idx < ht->nNumUsed) { |
2916 | 0 | if (HT_IS_PACKED(ht)) { |
2917 | 0 | return HASH_KEY_IS_LONG; |
2918 | 0 | } |
2919 | 0 | p = ht->arData + idx; |
2920 | 0 | if (p->key) { |
2921 | 0 | return HASH_KEY_IS_STRING; |
2922 | 0 | } else { |
2923 | 0 | return HASH_KEY_IS_LONG; |
2924 | 0 | } |
2925 | 0 | } |
2926 | 0 | return HASH_KEY_NON_EXISTENT; |
2927 | 0 | } |
2928 | | |
2929 | | |
2930 | | ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(const HashTable *ht, const HashPosition *pos) |
2931 | 0 | { |
2932 | 0 | uint32_t idx; |
2933 | 0 | Bucket *p; |
2934 | |
|
2935 | 0 | IS_CONSISTENT(ht); |
2936 | 0 | idx = _zend_hash_get_valid_pos(ht, *pos); |
2937 | 0 | if (idx < ht->nNumUsed) { |
2938 | 0 | if (HT_IS_PACKED(ht)) { |
2939 | 0 | return &ht->arPacked[idx]; |
2940 | 0 | } |
2941 | 0 | p = ht->arData + idx; |
2942 | 0 | return &p->val; |
2943 | 0 | } else { |
2944 | 0 | return NULL; |
2945 | 0 | } |
2946 | 0 | } |
2947 | | |
2948 | | ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q) |
2949 | 0 | { |
2950 | 0 | zval val; |
2951 | 0 | zend_ulong h; |
2952 | 0 | zend_string *key; |
2953 | |
|
2954 | 0 | val = p->val; |
2955 | 0 | h = p->h; |
2956 | 0 | key = p->key; |
2957 | |
|
2958 | 0 | p->val = q->val; |
2959 | 0 | p->h = q->h; |
2960 | 0 | p->key = q->key; |
2961 | |
|
2962 | 0 | q->val = val; |
2963 | 0 | q->h = h; |
2964 | 0 | q->key = key; |
2965 | 0 | } |
2966 | | |
2967 | | ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q) |
2968 | 0 | { |
2969 | 0 | zval val; |
2970 | |
|
2971 | 0 | val = p->val; |
2972 | 0 | p->val = q->val; |
2973 | 0 | q->val = val; |
2974 | 0 | } |
2975 | | |
2976 | | ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q) |
2977 | 0 | { |
2978 | 0 | zval val; |
2979 | 0 | zend_ulong h; |
2980 | |
|
2981 | 0 | val = p->val; |
2982 | 0 | h = p->h; |
2983 | |
|
2984 | 0 | p->val = q->val; |
2985 | 0 | p->h = q->h; |
2986 | |
|
2987 | 0 | q->val = val; |
2988 | 0 | q->h = h; |
2989 | 0 | } |
2990 | | |
2991 | | static void zend_hash_sort_internal(HashTable *ht, sort_func_t sort, bucket_compare_func_t compar, bool renumber) |
2992 | 2 | { |
2993 | 2 | Bucket *p; |
2994 | 2 | uint32_t i, j; |
2995 | | |
2996 | 2 | IS_CONSISTENT(ht); |
2997 | | |
2998 | 2 | if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) { |
2999 | | /* Doesn't require sorting */ |
3000 | 0 | return; |
3001 | 0 | } |
3002 | | |
3003 | 2 | if (HT_IS_PACKED(ht)) { |
3004 | 0 | zend_hash_packed_to_hash(ht); // TODO: ??? |
3005 | 0 | } |
3006 | | |
3007 | 2 | if (HT_IS_WITHOUT_HOLES(ht)) { |
3008 | | /* Store original order of elements in extra space to allow stable sorting. */ |
3009 | 28 | for (i = 0; i < ht->nNumUsed; i++) { |
3010 | 26 | Z_EXTRA(ht->arData[i].val) = i; |
3011 | 26 | } |
3012 | 2 | } else { |
3013 | | /* Remove holes and store original order. */ |
3014 | 0 | for (j = 0, i = 0; j < ht->nNumUsed; j++) { |
3015 | 0 | p = ht->arData + j; |
3016 | 0 | if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue; |
3017 | 0 | if (i != j) { |
3018 | 0 | ht->arData[i] = *p; |
3019 | 0 | } |
3020 | 0 | Z_EXTRA(ht->arData[i].val) = i; |
3021 | 0 | i++; |
3022 | 0 | } |
3023 | 0 | ht->nNumUsed = i; |
3024 | 0 | } |
3025 | | |
3026 | 2 | if (!HT_IS_PACKED(ht)) { |
3027 | | /* We broke the hash collisions chains overriding Z_NEXT() by Z_EXTRA(). |
3028 | | * Reset the hash headers table as well to avoid possible inconsistent |
3029 | | * access on recursive data structures. |
3030 | | * |
3031 | | * See Zend/tests/bug63882_2.phpt |
3032 | | */ |
3033 | 2 | HT_HASH_RESET(ht); |
3034 | 2 | } |
3035 | | |
3036 | 2 | sort((void *)ht->arData, ht->nNumUsed, sizeof(Bucket), (compare_func_t) compar, |
3037 | 2 | (swap_func_t)(renumber? zend_hash_bucket_renum_swap : |
3038 | 2 | (HT_IS_PACKED(ht) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap))); |
3039 | | |
3040 | 2 | ht->nInternalPointer = 0; |
3041 | | |
3042 | 2 | if (renumber) { |
3043 | 0 | for (j = 0; j < i; j++) { |
3044 | 0 | p = ht->arData + j; |
3045 | 0 | p->h = j; |
3046 | 0 | if (p->key) { |
3047 | 0 | zend_string_release(p->key); |
3048 | 0 | p->key = NULL; |
3049 | 0 | } |
3050 | 0 | } |
3051 | |
|
3052 | 0 | ht->nNextFreeElement = i; |
3053 | 0 | } |
3054 | 2 | if (HT_IS_PACKED(ht)) { |
3055 | 0 | if (!renumber) { |
3056 | 0 | zend_hash_packed_to_hash(ht); |
3057 | 0 | } |
3058 | 2 | } else { |
3059 | 2 | if (renumber) { |
3060 | 0 | void *new_data, *old_data = HT_GET_DATA_ADDR(ht); |
3061 | 0 | Bucket *old_buckets = ht->arData; |
3062 | 0 | zval *zv; |
3063 | |
|
3064 | 0 | new_data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)); |
3065 | 0 | HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS; |
3066 | 0 | ht->nTableMask = HT_MIN_MASK; |
3067 | 0 | HT_SET_DATA_ADDR(ht, new_data); |
3068 | 0 | p = old_buckets; |
3069 | 0 | zv = ht->arPacked; |
3070 | 0 | for (i = 0; i < ht->nTableSize; i++) { |
3071 | 0 | ZVAL_COPY_VALUE(zv, &p->val); |
3072 | 0 | zv++; |
3073 | 0 | p++; |
3074 | 0 | } |
3075 | 0 | pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT); |
3076 | 0 | HT_HASH_RESET_PACKED(ht); |
3077 | 2 | } else { |
3078 | 2 | zend_hash_rehash(ht); |
3079 | 2 | } |
3080 | 2 | } |
3081 | 2 | } |
3082 | | |
3083 | | ZEND_API void ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, bucket_compare_func_t compar, bool renumber) |
3084 | 2 | { |
3085 | 2 | HT_ASSERT_RC1(ht); |
3086 | 2 | zend_hash_sort_internal(ht, sort, compar, renumber); |
3087 | 2 | } |
3088 | | |
3089 | | ZEND_API void ZEND_FASTCALL zend_array_sort_ex(HashTable *ht, sort_func_t sort, bucket_compare_func_t compar, bool renumber) |
3090 | 0 | { |
3091 | 0 | HT_ASSERT_RC1(ht); |
3092 | | |
3093 | | /* Unpack the array early to avoid RCn assertion failures. */ |
3094 | 0 | if (HT_IS_PACKED(ht)) { |
3095 | 0 | zend_hash_packed_to_hash(ht); |
3096 | 0 | } |
3097 | | |
3098 | | /* Adding a refcount prevents the array from going away. */ |
3099 | 0 | GC_ADDREF(ht); |
3100 | |
|
3101 | 0 | zend_hash_sort_internal(ht, sort, compar, renumber); |
3102 | |
|
3103 | 0 | if (UNEXPECTED(GC_DELREF(ht) == 0)) { |
3104 | 0 | zend_array_destroy(ht); |
3105 | 0 | } else { |
3106 | 0 | gc_check_possible_root((zend_refcounted *)ht); |
3107 | 0 | } |
3108 | 0 | } |
3109 | | |
3110 | 0 | static zend_always_inline int zend_hash_compare_impl(const HashTable *ht1, const HashTable *ht2, compare_func_t compar, bool ordered) { |
3111 | 0 | uint32_t idx1, idx2; |
3112 | 0 | zend_string *key1, *key2; |
3113 | 0 | zend_ulong h1, h2; |
3114 | 0 | zval *pData1, *pData2;; |
3115 | 0 | int result; |
3116 | |
|
3117 | 0 | if (ht1->nNumOfElements != ht2->nNumOfElements) { |
3118 | 0 | return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1; |
3119 | 0 | } |
3120 | | |
3121 | 0 | for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) { |
3122 | 0 | if (HT_IS_PACKED(ht1)) { |
3123 | 0 | pData1 = ht1->arPacked + idx1; |
3124 | 0 | h1 = idx1; |
3125 | 0 | key1 = NULL; |
3126 | 0 | } else { |
3127 | 0 | Bucket *p = ht1->arData + idx1; |
3128 | 0 | pData1 = &p->val; |
3129 | 0 | h1 = p->h; |
3130 | 0 | key1 = p->key; |
3131 | 0 | } |
3132 | |
|
3133 | 0 | if (Z_TYPE_P(pData1) == IS_UNDEF) continue; |
3134 | 0 | if (ordered) { |
3135 | 0 | if (HT_IS_PACKED(ht2)) { |
3136 | 0 | while (1) { |
3137 | 0 | ZEND_ASSERT(idx2 != ht2->nNumUsed); |
3138 | 0 | pData2 = ht2->arPacked + idx2; |
3139 | 0 | h2 = idx2; |
3140 | 0 | key2 = NULL; |
3141 | 0 | if (Z_TYPE_P(pData2) != IS_UNDEF) break; |
3142 | 0 | idx2++; |
3143 | 0 | } |
3144 | 0 | } else { |
3145 | 0 | while (1) { |
3146 | 0 | Bucket *p; |
3147 | 0 | ZEND_ASSERT(idx2 != ht2->nNumUsed); |
3148 | 0 | p = ht2->arData + idx2; |
3149 | 0 | pData2 = &p->val; |
3150 | 0 | h2 = p->h; |
3151 | 0 | key2 = p->key; |
3152 | 0 | if (Z_TYPE_P(pData2) != IS_UNDEF) break; |
3153 | 0 | idx2++; |
3154 | 0 | } |
3155 | 0 | } |
3156 | 0 | if (key1 == NULL && key2 == NULL) { /* numeric indices */ |
3157 | 0 | if (h1 != h2) { |
3158 | 0 | return h1 > h2 ? 1 : -1; |
3159 | 0 | } |
3160 | 0 | } else if (key1 != NULL && key2 != NULL) { /* string indices */ |
3161 | 0 | if (ZSTR_LEN(key1) != ZSTR_LEN(key2)) { |
3162 | 0 | return ZSTR_LEN(key1) > ZSTR_LEN(key2) ? 1 : -1; |
3163 | 0 | } |
3164 | | |
3165 | 0 | result = memcmp(ZSTR_VAL(key1), ZSTR_VAL(key2), ZSTR_LEN(key1)); |
3166 | 0 | if (result != 0) { |
3167 | 0 | return result; |
3168 | 0 | } |
3169 | 0 | } else { |
3170 | | /* Mixed key types: A string key is considered as larger */ |
3171 | 0 | return key1 != NULL ? 1 : -1; |
3172 | 0 | } |
3173 | 0 | idx2++; |
3174 | 0 | } else { |
3175 | 0 | if (key1 == NULL) { /* numeric index */ |
3176 | 0 | pData2 = zend_hash_index_find(ht2, h1); |
3177 | 0 | if (pData2 == NULL) { |
3178 | 0 | return 1; |
3179 | 0 | } |
3180 | 0 | } else { /* string index */ |
3181 | 0 | pData2 = zend_hash_find(ht2, key1); |
3182 | 0 | if (pData2 == NULL) { |
3183 | 0 | return 1; |
3184 | 0 | } |
3185 | 0 | } |
3186 | 0 | } |
3187 | | |
3188 | 0 | if (Z_TYPE_P(pData1) == IS_INDIRECT) { |
3189 | 0 | pData1 = Z_INDIRECT_P(pData1); |
3190 | 0 | } |
3191 | 0 | if (Z_TYPE_P(pData2) == IS_INDIRECT) { |
3192 | 0 | pData2 = Z_INDIRECT_P(pData2); |
3193 | 0 | } |
3194 | |
|
3195 | 0 | if (Z_TYPE_P(pData1) == IS_UNDEF) { |
3196 | 0 | if (Z_TYPE_P(pData2) != IS_UNDEF) { |
3197 | 0 | return -1; |
3198 | 0 | } |
3199 | 0 | } else if (Z_TYPE_P(pData2) == IS_UNDEF) { |
3200 | 0 | return 1; |
3201 | 0 | } else { |
3202 | 0 | result = compar(pData1, pData2); |
3203 | 0 | if (result != 0) { |
3204 | 0 | return result; |
3205 | 0 | } |
3206 | 0 | } |
3207 | 0 | } |
3208 | | |
3209 | 0 | return 0; |
3210 | 0 | } |
3211 | | |
3212 | | ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, bool ordered) |
3213 | 0 | { |
3214 | 0 | int result; |
3215 | 0 | IS_CONSISTENT(ht1); |
3216 | 0 | IS_CONSISTENT(ht2); |
3217 | |
|
3218 | 0 | if (ht1 == ht2) { |
3219 | 0 | return 0; |
3220 | 0 | } |
3221 | | |
3222 | | /* It's enough to protect only one of the arrays. |
3223 | | * The second one may be referenced from the first and this may cause |
3224 | | * false recursion detection. |
3225 | | */ |
3226 | 0 | if (UNEXPECTED(GC_IS_RECURSIVE(ht1))) { |
3227 | 0 | zend_throw_error(NULL, "Nesting level too deep - recursive dependency?"); |
3228 | 0 | return ZEND_UNCOMPARABLE; |
3229 | 0 | } |
3230 | | |
3231 | 0 | GC_TRY_PROTECT_RECURSION(ht1); |
3232 | 0 | result = zend_hash_compare_impl(ht1, ht2, compar, ordered); |
3233 | 0 | GC_TRY_UNPROTECT_RECURSION(ht1); |
3234 | |
|
3235 | 0 | return result; |
3236 | 0 | } |
3237 | | |
3238 | | |
3239 | | ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag) |
3240 | 0 | { |
3241 | 0 | uint32_t idx; |
3242 | 0 | zval *res; |
3243 | |
|
3244 | 0 | IS_CONSISTENT(ht); |
3245 | |
|
3246 | 0 | if (ht->nNumOfElements == 0 ) { |
3247 | 0 | return NULL; |
3248 | 0 | } |
3249 | | |
3250 | 0 | if (HT_IS_PACKED(ht)) { |
3251 | 0 | zval *zv; |
3252 | |
|
3253 | 0 | idx = 0; |
3254 | 0 | while (1) { |
3255 | 0 | if (idx == ht->nNumUsed) { |
3256 | 0 | return NULL; |
3257 | 0 | } |
3258 | 0 | if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) break; |
3259 | 0 | idx++; |
3260 | 0 | } |
3261 | 0 | res = ht->arPacked + idx; |
3262 | 0 | for (; idx < ht->nNumUsed; idx++) { |
3263 | 0 | zv = ht->arPacked + idx; |
3264 | 0 | if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue; |
3265 | | |
3266 | 0 | if (flag) { |
3267 | 0 | if (compar(res, zv) < 0) { /* max */ |
3268 | 0 | res = zv; |
3269 | 0 | } |
3270 | 0 | } else { |
3271 | 0 | if (compar(res, zv) > 0) { /* min */ |
3272 | 0 | res = zv; |
3273 | 0 | } |
3274 | 0 | } |
3275 | 0 | } |
3276 | 0 | } else { |
3277 | 0 | Bucket *p; |
3278 | |
|
3279 | 0 | idx = 0; |
3280 | 0 | while (1) { |
3281 | 0 | if (idx == ht->nNumUsed) { |
3282 | 0 | return NULL; |
3283 | 0 | } |
3284 | 0 | if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break; |
3285 | 0 | idx++; |
3286 | 0 | } |
3287 | 0 | res = &ht->arData[idx].val; |
3288 | 0 | for (; idx < ht->nNumUsed; idx++) { |
3289 | 0 | p = ht->arData + idx; |
3290 | 0 | if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue; |
3291 | | |
3292 | 0 | if (flag) { |
3293 | 0 | if (compar(res, &p->val) < 0) { /* max */ |
3294 | 0 | res = &p->val; |
3295 | 0 | } |
3296 | 0 | } else { |
3297 | 0 | if (compar(res, &p->val) > 0) { /* min */ |
3298 | 0 | res = &p->val; |
3299 | 0 | } |
3300 | 0 | } |
3301 | 0 | } |
3302 | 0 | } |
3303 | 0 | return res; |
3304 | 0 | } |
3305 | | |
3306 | | ZEND_API bool ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx) |
3307 | 12.7k | { |
3308 | 12.7k | const char *tmp = key; |
3309 | | |
3310 | 12.7k | const char *end = key + length; |
3311 | | |
3312 | 12.7k | if (*tmp == '-') { |
3313 | 133 | tmp++; |
3314 | 133 | } |
3315 | | |
3316 | 12.7k | if ((*tmp == '0' && length > 1) /* numbers with leading zeros */ |
3317 | 12.6k | || (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */ |
3318 | 0 | || (SIZEOF_ZEND_LONG == 4 && |
3319 | 0 | end - tmp == MAX_LENGTH_OF_LONG - 1 && |
3320 | 143 | *tmp > '2')) { /* overflow */ |
3321 | 143 | return 0; |
3322 | 143 | } |
3323 | 12.6k | *idx = (*tmp - '0'); |
3324 | 24.5k | while (1) { |
3325 | 24.5k | ++tmp; |
3326 | 24.5k | if (tmp == end) { |
3327 | 2.47k | if (*key == '-') { |
3328 | 130 | if (*idx-1 > ZEND_LONG_MAX) { /* overflow */ |
3329 | 42 | return 0; |
3330 | 42 | } |
3331 | 88 | *idx = 0 - *idx; |
3332 | 2.34k | } else if (*idx > ZEND_LONG_MAX) { /* overflow */ |
3333 | 24 | return 0; |
3334 | 24 | } |
3335 | 2.40k | return 1; |
3336 | 2.47k | } |
3337 | 22.0k | if (*tmp <= '9' && *tmp >= '0') { |
3338 | 11.9k | *idx = (*idx * 10) + (*tmp - '0'); |
3339 | 11.9k | } else { |
3340 | 10.1k | return 0; |
3341 | 10.1k | } |
3342 | 22.0k | } |
3343 | 12.6k | } |
3344 | | |
3345 | | /* Takes a "symtable" hashtable (contains integer and non-numeric string keys) |
3346 | | * and converts it to a "proptable" (contains only string keys). |
3347 | | * If the symtable didn't need duplicating, its refcount is incremented. |
3348 | | */ |
3349 | | ZEND_API HashTable* ZEND_FASTCALL zend_symtable_to_proptable(HashTable *ht) |
3350 | 0 | { |
3351 | 0 | zend_ulong num_key; |
3352 | 0 | zend_string *str_key; |
3353 | 0 | zval *zv; |
3354 | |
|
3355 | 0 | if (UNEXPECTED(HT_IS_PACKED(ht))) { |
3356 | 0 | goto convert; |
3357 | 0 | } |
3358 | | |
3359 | 0 | ZEND_HASH_MAP_FOREACH_STR_KEY(ht, str_key) { |
3360 | 0 | if (!str_key) { |
3361 | 0 | goto convert; |
3362 | 0 | } |
3363 | 0 | } ZEND_HASH_FOREACH_END(); |
3364 | | |
3365 | 0 | if (!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE)) { |
3366 | 0 | GC_ADDREF(ht); |
3367 | 0 | } |
3368 | |
|
3369 | 0 | return ht; |
3370 | | |
3371 | 0 | convert: |
3372 | 0 | { |
3373 | 0 | HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht)); |
3374 | |
|
3375 | 0 | ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) { |
3376 | 0 | if (!str_key) { |
3377 | 0 | str_key = zend_long_to_str(num_key); |
3378 | 0 | zend_string_delref(str_key); |
3379 | 0 | } |
3380 | 0 | do { |
3381 | 0 | if (Z_OPT_REFCOUNTED_P(zv)) { |
3382 | 0 | if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) { |
3383 | 0 | zv = Z_REFVAL_P(zv); |
3384 | 0 | if (!Z_OPT_REFCOUNTED_P(zv)) { |
3385 | 0 | break; |
3386 | 0 | } |
3387 | 0 | } |
3388 | 0 | Z_ADDREF_P(zv); |
3389 | 0 | } |
3390 | 0 | } while (0); |
3391 | 0 | zend_hash_update(new_ht, str_key, zv); |
3392 | 0 | } ZEND_HASH_FOREACH_END(); |
3393 | |
|
3394 | 0 | return new_ht; |
3395 | 0 | } |
3396 | 0 | } |
3397 | | |
3398 | | /* Takes a "proptable" hashtable (contains only string keys) and converts it to |
3399 | | * a "symtable" (contains integer and non-numeric string keys). |
3400 | | * If the proptable didn't need duplicating, its refcount is incremented. |
3401 | | */ |
3402 | | ZEND_API HashTable* ZEND_FASTCALL zend_proptable_to_symtable(HashTable *ht, bool always_duplicate) |
3403 | 0 | { |
3404 | 0 | zend_ulong num_key; |
3405 | 0 | zend_string *str_key; |
3406 | 0 | zval *zv; |
3407 | |
|
3408 | 0 | if (!HT_IS_PACKED(ht)) { |
3409 | 0 | ZEND_HASH_MAP_FOREACH_STR_KEY(ht, str_key) { |
3410 | | /* The `str_key &&` here might seem redundant: property tables should |
3411 | | * only have string keys. Unfortunately, this isn't true, at the very |
3412 | | * least because of ArrayObject, which stores a symtable where the |
3413 | | * property table should be. |
3414 | | */ |
3415 | 0 | if (str_key && ZEND_HANDLE_NUMERIC(str_key, num_key)) { |
3416 | 0 | goto convert; |
3417 | 0 | } |
3418 | 0 | } ZEND_HASH_FOREACH_END(); |
3419 | 0 | } |
3420 | | |
3421 | 0 | if (always_duplicate) { |
3422 | 0 | return zend_array_dup(ht); |
3423 | 0 | } |
3424 | | |
3425 | 0 | if (EXPECTED(!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE))) { |
3426 | 0 | GC_ADDREF(ht); |
3427 | 0 | } |
3428 | |
|
3429 | 0 | return ht; |
3430 | | |
3431 | 0 | convert: |
3432 | 0 | { |
3433 | 0 | HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht)); |
3434 | |
|
3435 | 0 | ZEND_HASH_MAP_FOREACH_KEY_VAL_IND(ht, num_key, str_key, zv) { |
3436 | 0 | do { |
3437 | 0 | if (Z_OPT_REFCOUNTED_P(zv)) { |
3438 | 0 | if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) { |
3439 | 0 | zv = Z_REFVAL_P(zv); |
3440 | 0 | if (!Z_OPT_REFCOUNTED_P(zv)) { |
3441 | 0 | break; |
3442 | 0 | } |
3443 | 0 | } |
3444 | 0 | Z_ADDREF_P(zv); |
3445 | 0 | } |
3446 | 0 | } while (0); |
3447 | | /* Again, thank ArrayObject for `!str_key ||`. */ |
3448 | 0 | if (!str_key || ZEND_HANDLE_NUMERIC(str_key, num_key)) { |
3449 | 0 | zend_hash_index_update(new_ht, num_key, zv); |
3450 | 0 | } else { |
3451 | 0 | zend_hash_update(new_ht, str_key, zv); |
3452 | 0 | } |
3453 | 0 | } ZEND_HASH_FOREACH_END(); |
3454 | |
|
3455 | 0 | return new_ht; |
3456 | 0 | } |
3457 | 0 | } |