diff options
author | Dmitry Stogov <dmitry@zend.com> | 2015-03-13 17:13:19 +0300 |
---|---|---|
committer | Dmitry Stogov <dmitry@zend.com> | 2015-03-13 17:13:19 +0300 |
commit | 2b42d719084631d255ec7ebb6c2928b9339915c2 (patch) | |
tree | 33321998e169cfa41435609895c0d6b379dcbdff | |
parent | 0a4a11b73ae32b31810451d1f7e8719ca0a503db (diff) | |
download | php-git-2b42d719084631d255ec7ebb6c2928b9339915c2.tar.gz |
Changed HashTable layout:
Removed HashTable->arHash (reduced memory consumption). Now hash slots may be accessed using HT_HASH() macro.
Hash slotas are allocated together with Buckets (before them) and lay in reverse order from HashTable->arData base address (see comments in Zend/zend_types.h)
Indexes in hash table and conflict resolution chains (Z_NEXT) may be stored as indeces or offsets in bytes, depending on system (32 or 64-bit).
HashTable data filelds are reordered to keep the most useful for zend_hash_find() data in the same CPU cache line.
-rw-r--r-- | Zend/zend_hash.c | 319 | ||||
-rw-r--r-- | Zend/zend_hash.h | 20 | ||||
-rw-r--r-- | Zend/zend_string.c | 62 | ||||
-rw-r--r-- | Zend/zend_types.h | 59 | ||||
-rw-r--r-- | Zend/zend_vm_def.h | 6 | ||||
-rw-r--r-- | Zend/zend_vm_execute.h | 6 | ||||
-rw-r--r-- | ext/opcache/ZendAccelerator.c | 65 | ||||
-rw-r--r-- | ext/opcache/zend_accelerator_util_funcs.c | 61 | ||||
-rw-r--r-- | ext/opcache/zend_persist.c | 43 | ||||
-rw-r--r-- | ext/opcache/zend_persist_calc.c | 7 | ||||
-rw-r--r-- | ext/spl/spl_array.c | 18 | ||||
-rw-r--r-- | ext/spl/spl_observer.c | 2 | ||||
-rw-r--r-- | ext/standard/array.c | 2 |
13 files changed, 363 insertions, 307 deletions
diff --git a/Zend/zend_hash.c b/Zend/zend_hash.c index 8a9a032e16..681b8d0ec2 100644 --- a/Zend/zend_hash.c +++ b/Zend/zend_hash.c @@ -126,13 +126,13 @@ static void zend_always_inline zend_hash_check_init(HashTable *ht, int packed) if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) { if (packed) { (ht)->u.flags |= HASH_FLAG_INITIALIZED | HASH_FLAG_PACKED; - (ht)->arData = (Bucket *) pemalloc((ht)->nTableSize * sizeof(Bucket), (ht)->u.flags & HASH_FLAG_PERSISTENT); + HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT)); + HT_HASH_RESET_PACKED(ht); } else { (ht)->u.flags |= HASH_FLAG_INITIALIZED; - (ht)->nTableMask = (ht)->nTableSize - 1; - (ht)->arData = (Bucket *) pemalloc((ht)->nTableSize * (sizeof(Bucket) + sizeof(uint32_t)), (ht)->u.flags & HASH_FLAG_PERSISTENT); - (ht)->arHash = (uint32_t*)((ht)->arData + (ht)->nTableSize); - memset((ht)->arHash, INVALID_IDX, (ht)->nTableSize * sizeof(uint32_t)); + (ht)->nTableMask = -(ht)->nTableSize; + HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT)); + HT_HASH_RESET(ht); } } } @@ -140,36 +140,22 @@ static void zend_always_inline zend_hash_check_init(HashTable *ht, int packed) #define CHECK_INIT(ht, packed) \ zend_hash_check_init(ht, packed) -static const uint32_t uninitialized_bucket = {INVALID_IDX}; +static const uint32_t uninitialized_bucket[-HT_MIN_MASK] = + {HT_INVALID_IDX, HT_INVALID_IDX}; ZEND_API void _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent ZEND_FILE_LINE_DC) { GC_REFCOUNT(ht) = 1; GC_TYPE_INFO(ht) = IS_ARRAY; + ht->u.flags = (persistent ? HASH_FLAG_PERSISTENT : 0) | HASH_FLAG_APPLY_PROTECTION; ht->nTableSize = zend_hash_check_size(nSize); - ht->nTableMask = 0; + ht->nTableMask = HT_MIN_MASK; + HT_SET_DATA_ADDR(ht, &uninitialized_bucket); ht->nNumUsed = 0; ht->nNumOfElements = 0; + ht->nInternalPointer = HT_INVALID_IDX; ht->nNextFreeElement = 0; - ht->arData = NULL; - ht->arHash = (uint32_t*)&uninitialized_bucket; ht->pDestructor = pDestructor; - ht->nInternalPointer = INVALID_IDX; - ht->u.flags = (persistent ? HASH_FLAG_PERSISTENT : 0) | HASH_FLAG_APPLY_PROTECTION; -} - -static zend_always_inline void zend_hash_realloc(HashTable *ht, size_t new_size) -{ -#if 1 - if (!(ht->u.flags & HASH_FLAG_PERSISTENT) && new_size <= ZEND_MM_MAX_SMALL_SIZE) { - Bucket *newData = emalloc(new_size); - memcpy(newData, ht->arData, ht->nNumUsed * sizeof(Bucket)); - efree(ht->arData); - ht->arData = newData; - return; - } -#endif - ht->arData = (Bucket *) perealloc2(ht->arData, new_size, ht->nNumUsed * sizeof(Bucket), ht->u.flags & HASH_FLAG_PERSISTENT); } static void zend_hash_packed_grow(HashTable *ht) @@ -180,7 +166,7 @@ static void zend_hash_packed_grow(HashTable *ht) } HANDLE_BLOCK_INTERRUPTIONS(); ht->nTableSize += ht->nTableSize; - zend_hash_realloc(ht, ht->nTableSize * sizeof(Bucket)); + HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT)); HANDLE_UNBLOCK_INTERRUPTIONS(); } @@ -194,24 +180,33 @@ ZEND_API void zend_hash_real_init(HashTable *ht, zend_bool packed) ZEND_API void zend_hash_packed_to_hash(HashTable *ht) { + void *old_data = HT_GET_DATA_ADDR(ht); + Bucket *old_buckets = ht->arData; + HT_ASSERT(GC_REFCOUNT(ht) == 1); HANDLE_BLOCK_INTERRUPTIONS(); ht->u.flags &= ~HASH_FLAG_PACKED; - ht->nTableMask = ht->nTableSize - 1; - zend_hash_realloc(ht, ht->nTableSize * (sizeof(Bucket) + sizeof(uint32_t))); - ht->arHash = (uint32_t*)(ht->arData + ht->nTableSize); + ht->nTableMask = -ht->nTableSize; + HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT)); + memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); + pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT); zend_hash_rehash(ht); HANDLE_UNBLOCK_INTERRUPTIONS(); } ZEND_API void zend_hash_to_packed(HashTable *ht) { + void *old_data = HT_GET_DATA_ADDR(ht); + Bucket *old_buckets = ht->arData; + HT_ASSERT(GC_REFCOUNT(ht) == 1); HANDLE_BLOCK_INTERRUPTIONS(); ht->u.flags |= HASH_FLAG_PACKED; - ht->nTableMask = 0; - zend_hash_realloc(ht, ht->nTableSize * sizeof(Bucket)); - ht->arHash = (uint32_t*)&uninitialized_bucket; + ht->nTableMask = HT_MIN_MASK; + HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT)); + HT_HASH_RESET_PACKED(ht); + memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); + pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT); HANDLE_UNBLOCK_INTERRUPTIONS(); } @@ -238,17 +233,21 @@ ZEND_API void zend_hash_extend(HashTable *ht, uint32_t nSize, zend_bool packed) if (nSize > ht->nTableSize) { HANDLE_BLOCK_INTERRUPTIONS(); ht->nTableSize = zend_hash_check_size(nSize); - zend_hash_realloc(ht, ht->nTableSize * sizeof(Bucket)); + HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT)); HANDLE_UNBLOCK_INTERRUPTIONS(); } } else { ZEND_ASSERT(!(ht->u.flags & HASH_FLAG_PACKED)); if (nSize > ht->nTableSize) { + void *old_data = HT_GET_DATA_ADDR(ht); + Bucket *old_buckets = ht->arData; + HANDLE_BLOCK_INTERRUPTIONS(); ht->nTableSize = zend_hash_check_size(nSize); - zend_hash_realloc(ht, ht->nTableSize * (sizeof(Bucket) + sizeof(uint32_t))); - ht->arHash = (uint32_t*)(ht->arData + ht->nTableSize); - ht->nTableMask = ht->nTableSize - 1; + ht->nTableMask = -ht->nTableSize; + HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT)); + memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); + pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT); zend_hash_rehash(ht); HANDLE_UNBLOCK_INTERRUPTIONS(); } @@ -307,8 +306,8 @@ ZEND_API HashPosition zend_hash_iterator_pos(uint32_t idx, HashTable *ht) HashTableIterator *iter = EG(ht_iterators) + idx; ZEND_ASSERT(idx != (uint32_t)-1); - if (iter->pos == INVALID_IDX) { - return INVALID_IDX; + if (iter->pos == HT_INVALID_IDX) { + return HT_INVALID_IDX; } else if (UNEXPECTED(iter->ht != ht)) { if (EXPECTED(iter->ht) && EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) { iter->ht->u.v.nIteratorsCount--; @@ -372,7 +371,7 @@ ZEND_API HashPosition zend_hash_iterators_lower_pos(HashTable *ht, HashPosition { HashTableIterator *iter = EG(ht_iterators); HashTableIterator *end = iter + EG(ht_iterators_used); - HashPosition res = INVALID_IDX; + HashPosition res = HT_INVALID_IDX; while (iter != end) { if (iter->ht == ht) { @@ -403,18 +402,19 @@ static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, zen zend_ulong h; uint32_t nIndex; uint32_t idx; - Bucket *p; + Bucket *p, *arData; h = zend_string_hash_val(key); - nIndex = h & ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - p = ht->arData + idx; - if ((p->key == key) || /* check for the the same interned string */ - (p->h == h && - p->key && - p->key->len == key->len && - memcmp(p->key->val, key->val, key->len) == 0)) { + arData = ht->arData; + nIndex = h | ht->nTableMask; + idx = HT_HASH_EX(arData, nIndex); + while (idx != HT_INVALID_IDX) { + p = HT_HASH_TO_BUCKET_EX(arData, idx); + if (p->key == key || /* check for the the same interned string */ + (p->h == h && + p->key && + p->key->len == key->len && + memcmp(p->key->val, key->val, key->len) == 0)) { return p; } idx = Z_NEXT(p->val); @@ -426,13 +426,14 @@ static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, { uint32_t nIndex; uint32_t idx; - Bucket *p; - - nIndex = h & ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - ZEND_ASSERT(idx < ht->nTableSize); - p = ht->arData + idx; + Bucket *p, *arData; + + arData = ht->arData; + nIndex = h | ht->nTableMask; + idx = HT_HASH_EX(arData, nIndex); + while (idx != HT_INVALID_IDX) { + ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize)); + p = HT_HASH_TO_BUCKET_EX(arData, idx); if ((p->h == h) && p->key && (p->key->len == len) @@ -448,13 +449,14 @@ static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *h { uint32_t nIndex; uint32_t idx; - Bucket *p; - - nIndex = h & ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - ZEND_ASSERT(idx < ht->nTableSize); - p = ht->arData + idx; + Bucket *p, *arData; + + arData = ht->arData; + nIndex = h | ht->nTableMask; + idx = HT_HASH_EX(arData, nIndex); + while (idx != HT_INVALID_IDX) { + ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize)); + p = HT_HASH_TO_BUCKET_EX(arData, idx); if (p->h == h && !p->key) { return p; } @@ -508,18 +510,18 @@ add_to_hash: HANDLE_BLOCK_INTERRUPTIONS(); idx = ht->nNumUsed++; ht->nNumOfElements++; - if (ht->nInternalPointer == INVALID_IDX) { + if (ht->nInternalPointer == HT_INVALID_IDX) { ht->nInternalPointer = idx; } - zend_hash_iterators_update(ht, INVALID_IDX, idx); + zend_hash_iterators_update(ht, HT_INVALID_IDX, idx); p = ht->arData + idx; p->h = h = zend_string_hash_val(key); p->key = key; zend_string_addref(key); ZVAL_COPY_VALUE(&p->val, pData); - nIndex = h & ht->nTableMask; - Z_NEXT(p->val) = ht->arHash[nIndex]; - ht->arHash[nIndex] = idx; + nIndex = h | ht->nTableMask; + Z_NEXT(p->val) = HT_HASH(ht, nIndex); + HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx); HANDLE_UNBLOCK_INTERRUPTIONS(); return &p->val; @@ -677,10 +679,10 @@ add_to_packed: ht->nNumUsed = h + 1; } ht->nNumOfElements++; - if (ht->nInternalPointer == INVALID_IDX) { + if (ht->nInternalPointer == HT_INVALID_IDX) { ht->nInternalPointer = h; } - zend_hash_iterators_update(ht, INVALID_IDX, h); + zend_hash_iterators_update(ht, HT_INVALID_IDX, h); if ((zend_long)h >= (zend_long)ht->nNextFreeElement) { ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX; } @@ -720,20 +722,20 @@ add_to_hash: HANDLE_BLOCK_INTERRUPTIONS(); idx = ht->nNumUsed++; ht->nNumOfElements++; - if (ht->nInternalPointer == INVALID_IDX) { + if (ht->nInternalPointer == HT_INVALID_IDX) { ht->nInternalPointer = idx; } - zend_hash_iterators_update(ht, INVALID_IDX, idx); + zend_hash_iterators_update(ht, HT_INVALID_IDX, idx); if ((zend_long)h >= (zend_long)ht->nNextFreeElement) { ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX; } p = ht->arData + idx; p->h = h; p->key = NULL; - nIndex = h & ht->nTableMask; + nIndex = h | ht->nTableMask; ZVAL_COPY_VALUE(&p->val, pData); - Z_NEXT(p->val) = ht->arHash[nIndex]; - ht->arHash[nIndex] = idx; + Z_NEXT(p->val) = HT_HASH(ht, nIndex); + HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx); HANDLE_UNBLOCK_INTERRUPTIONS(); return &p->val; @@ -780,11 +782,15 @@ static void zend_hash_do_resize(HashTable *ht) zend_hash_rehash(ht); HANDLE_UNBLOCK_INTERRUPTIONS(); } else if (ht->nTableSize < HT_MAX_SIZE) { /* Let's double the table size */ + void *old_data = HT_GET_DATA_ADDR(ht); + Bucket *old_buckets = ht->arData; + HANDLE_BLOCK_INTERRUPTIONS(); ht->nTableSize += ht->nTableSize; - zend_hash_realloc(ht, ht->nTableSize * (sizeof(Bucket) + sizeof(uint32_t))); - ht->arHash = (uint32_t*)(ht->arData + ht->nTableSize); - ht->nTableMask = ht->nTableSize - 1; + ht->nTableMask = -ht->nTableSize; + HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT)); + memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); + pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT); zend_hash_rehash(ht); HANDLE_UNBLOCK_INTERRUPTIONS(); } else { @@ -801,12 +807,12 @@ ZEND_API int zend_hash_rehash(HashTable *ht) if (UNEXPECTED(ht->nNumOfElements == 0)) { if (ht->u.flags & HASH_FLAG_INITIALIZED) { - memset(ht->arHash, INVALID_IDX, ht->nTableSize * sizeof(uint32_t)); + HT_HASH_RESET(ht); } return SUCCESS; } - memset(ht->arHash, INVALID_IDX, ht->nTableSize * sizeof(uint32_t)); + HT_HASH_RESET(ht); if (EXPECTED(ht->u.v.nIteratorsCount == 0)) { for (i = 0, j = 0; i < ht->nNumUsed; i++) { p = ht->arData + i; @@ -817,9 +823,9 @@ ZEND_API int zend_hash_rehash(HashTable *ht) ht->nInternalPointer = j; } } - nIndex = ht->arData[j].h & ht->nTableMask; - Z_NEXT(ht->arData[j].val) = ht->arHash[nIndex]; - ht->arHash[nIndex] = j; + nIndex = ht->arData[j].h | ht->nTableMask; + Z_NEXT(ht->arData[j].val) = HT_HASH(ht, nIndex); + HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j); j++; } } else { @@ -838,9 +844,9 @@ ZEND_API int zend_hash_rehash(HashTable *ht) iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1); } } - nIndex = ht->arData[j].h & ht->nTableMask; - Z_NEXT(ht->arData[j].val) = ht->arHash[nIndex]; - ht->arHash[nIndex] = j; + nIndex = ht->arData[j].h | ht->nTableMask; + Z_NEXT(ht->arData[j].val) = HT_HASH(ht, nIndex); + HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j); j++; } } @@ -855,7 +861,7 @@ static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, if (prev) { Z_NEXT(prev->val) = Z_NEXT(p->val); } else { - ht->arHash[p->h & ht->nTableMask] = Z_NEXT(p->val); + HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val); } } if (ht->nNumUsed - 1 == idx) { @@ -870,7 +876,7 @@ static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, while (1) { new_idx++; if (new_idx >= ht->nNumUsed) { - new_idx = INVALID_IDX; + new_idx = HT_INVALID_IDX; break; } else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) { break; @@ -900,14 +906,14 @@ static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bu Bucket *prev = NULL; if (!(ht->u.flags & HASH_FLAG_PACKED)) { - uint32_t nIndex = p->h & ht->nTableMask; - uint32_t i = ht->arHash[nIndex]; + uint32_t nIndex = p->h | ht->nTableMask; + uint32_t i = HT_HASH(ht, nIndex); if (i != idx) { - prev = ht->arData + i; + prev = HT_HASH_TO_BUCKET(ht, i); while (Z_NEXT(prev->val) != idx) { i = Z_NEXT(prev->val); - prev = ht->arData + i; + prev = HT_HASH_TO_BUCKET(ht, i); } } } @@ -927,11 +933,11 @@ ZEND_API int zend_hash_del(HashTable *ht, zend_string *key) HT_ASSERT(GC_REFCOUNT(ht) == 1); h = zend_string_hash_val(key); - nIndex = h & ht->nTableMask; + nIndex = h | ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - p = ht->arData + idx; + idx = HT_HASH(ht, nIndex); + while (idx != HT_INVALID_IDX) { + p = HT_HASH_TO_BUCKET(ht, idx); if ((p->key == key) || (p->h == h && p->key && @@ -958,11 +964,11 @@ ZEND_API int zend_hash_del_ind(HashTable *ht, zend_string *key) HT_ASSERT(GC_REFCOUNT(ht) == 1); h = zend_string_hash_val(key); - nIndex = h & ht->nTableMask; + nIndex = h | ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - p = ht->arData + idx; + idx = HT_HASH(ht, nIndex); + while (idx != HT_INVALID_IDX) { + p = HT_HASH_TO_BUCKET(ht, idx); if ((p->key == key) || (p->h == h && p->key && @@ -1002,11 +1008,11 @@ ZEND_API int zend_hash_str_del(HashTable *ht, const char *str, size_t len) HT_ASSERT(GC_REFCOUNT(ht) == 1); h = zend_inline_hash_func(str, len); - nIndex = h & ht->nTableMask; + nIndex = h | ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - p = ht->arData + idx; + idx = HT_HASH(ht, nIndex); + while (idx != HT_INVALID_IDX) { + p = HT_HASH_TO_BUCKET(ht, idx); if ((p->h == h) && p->key && (p->key->len == len) @@ -1045,11 +1051,11 @@ ZEND_API int zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len) HT_ASSERT(GC_REFCOUNT(ht) == 1); h = zend_inline_hash_func(str, len); - nIndex = h & ht->nTableMask; + nIndex = h | ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - p = ht->arData + idx; + idx = HT_HASH(ht, nIndex); + while (idx != HT_INVALID_IDX) { + p = HT_HASH_TO_BUCKET(ht, idx); if ((p->h == h) && p->key && (p->key->len == len) @@ -1083,11 +1089,11 @@ ZEND_API int zend_hash_index_del(HashTable *ht, zend_ulong h) } return FAILURE; } - nIndex = h & ht->nTableMask; + nIndex = h | ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - p = ht->arData + idx; + idx = HT_HASH(ht, nIndex); + while (idx != HT_INVALID_IDX) { + p = HT_HASH_TO_BUCKET(ht, idx); if ((p->h == h) && (p->key == NULL)) { _zend_hash_del_el_ex(ht, idx, p, prev); return SUCCESS; @@ -1144,7 +1150,7 @@ ZEND_API void zend_hash_destroy(HashTable *ht) } else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) { return; } - pefree(ht->arData, ht->u.flags & HASH_FLAG_PERSISTENT); + pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT); } ZEND_API void zend_array_destroy(HashTable *ht) @@ -1185,7 +1191,7 @@ ZEND_API void zend_array_destroy(HashTable *ht) } else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) { goto free_ht; } - efree(ht->arData); + efree(HT_GET_DATA_ADDR(ht)); free_ht: FREE_HASHTABLE(ht); } @@ -1229,13 +1235,13 @@ ZEND_API void zend_hash_clean(HashTable *ht) } } if (!(ht->u.flags & HASH_FLAG_PACKED)) { - memset(ht->arHash, INVALID_IDX, ht->nTableSize * sizeof(uint32_t)); + HT_HASH_RESET(ht); } } ht->nNumUsed = 0; ht->nNumOfElements = 0; ht->nNextFreeElement = 0; - ht->nInternalPointer = INVALID_IDX; + ht->nInternalPointer = HT_INVALID_IDX; } ZEND_API void zend_symtable_clean(HashTable *ht) @@ -1257,13 +1263,13 @@ ZEND_API void zend_symtable_clean(HashTable *ht) } } while (++p != end); if (!(ht->u.flags & HASH_FLAG_PACKED)) { - memset(ht->arHash, INVALID_IDX, ht->nTableSize * sizeof(uint32_t)); + HT_HASH_RESET(ht); } } ht->nNumUsed = 0; ht->nNumOfElements = 0; ht->nNextFreeElement = 0; - ht->nInternalPointer = INVALID_IDX; + ht->nInternalPointer = HT_INVALID_IDX; } ZEND_API void zend_hash_graceful_destroy(HashTable *ht) @@ -1277,10 +1283,10 @@ ZEND_API void zend_hash_graceful_destroy(HashTable *ht) for (idx = 0; idx < ht->nNumUsed; idx++) { p = ht->arData + idx; if (Z_TYPE(p->val) == IS_UNDEF) continue; - _zend_hash_del_el(ht, idx, p); + _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); } if (ht->u.flags & HASH_FLAG_INITIALIZED) { - pefree(ht->arData, ht->u.flags & HASH_FLAG_PERSISTENT); + pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT); } SET_INCONSISTENT(HT_DESTROYED); @@ -1299,11 +1305,11 @@ ZEND_API void zend_hash_graceful_reverse_destroy(HashTable *ht) idx--; p = ht->arData + idx; if (Z_TYPE(p->val) == IS_UNDEF) continue; - _zend_hash_del_el(ht, idx, p); + _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); } if (ht->u.flags & HASH_FLAG_INITIALIZED) { - pefree(ht->arData, ht->u.flags & HASH_FLAG_PERSISTENT); + pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT); } SET_INCONSISTENT(HT_DESTROYED); @@ -1335,7 +1341,7 @@ ZEND_API void zend_hash_apply(HashTable *ht, apply_func_t apply_func) result = apply_func(&p->val); if (result & ZEND_HASH_APPLY_REMOVE) { - _zend_hash_del_el(ht, idx, p); + _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); } if (result & ZEND_HASH_APPLY_STOP) { break; @@ -1362,7 +1368,7 @@ ZEND_API void zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t appl result = apply_func(&p->val, argument); if (result & ZEND_HASH_APPLY_REMOVE) { - _zend_hash_del_el(ht, idx, p); + _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); } if (result & ZEND_HASH_APPLY_STOP) { break; @@ -1395,7 +1401,7 @@ ZEND_API void zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t ap result = apply_func(&p->val, num_args, args, &hash_key); if (result & ZEND_HASH_APPLY_REMOVE) { - _zend_hash_del_el(ht, idx, p); + _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); } if (result & ZEND_HASH_APPLY_STOP) { va_end(args); @@ -1427,7 +1433,7 @@ ZEND_API void zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func) result = apply_func(&p->val); if (result & ZEND_HASH_APPLY_REMOVE) { - _zend_hash_del_el(ht, idx, p); + _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); } if (result & ZEND_HASH_APPLY_STOP) { break; @@ -1448,13 +1454,13 @@ ZEND_API void zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_fun IS_CONSISTENT(target); HT_ASSERT(GC_REFCOUNT(target) == 1); - setTargetPointer = (target->nInternalPointer == INVALID_IDX); + setTargetPointer = (target->nInternalPointer == HT_INVALID_IDX); for (idx = 0; idx < source->nNumUsed; idx++) { p = source->arData + idx; if (Z_TYPE(p->val) == IS_UNDEF) continue; if (setTargetPointer && source->nInternalPointer == idx) { - target->nInternalPointer = INVALID_IDX; + target->nInternalPointer = HT_INVALID_IDX; } /* INDIRECT element may point to UNDEF-ined slots */ data = &p->val; @@ -1473,7 +1479,7 @@ ZEND_API void zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_fun pCopyConstructor(new_entry); } } - if (target->nInternalPointer == INVALID_IDX && target->nNumOfElements > 0) { + if (target->nInternalPointer == HT_INVALID_IDX && target->nNumOfElements > 0) { idx = 0; while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) { idx++; @@ -1500,7 +1506,7 @@ ZEND_API HashTable *zend_array_dup(HashTable *source) target->nTableMask = source->nTableMask; target->nTableSize = source->nTableSize; target->pDestructor = source->pDestructor; - target->nInternalPointer = INVALID_IDX; + target->nInternalPointer = HT_INVALID_IDX; target->u.flags = (source->u.flags & ~HASH_FLAG_PERSISTENT) | HASH_FLAG_APPLY_PROTECTION; target_idx = 0; @@ -1509,9 +1515,9 @@ ZEND_API HashTable *zend_array_dup(HashTable *source) target->nNumUsed = source->nNumUsed; target->nNumOfElements = source->nNumOfElements; target->nNextFreeElement = source->nNextFreeElement; - target->arData = (Bucket *) pemalloc(target->nTableSize * sizeof(Bucket), 0); - target->arHash = (uint32_t*)&uninitialized_bucket; + HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target))); target->nInternalPointer = source->nInternalPointer; + HT_HASH_RESET_PACKED(target); for (idx = 0; idx < source->nNumUsed; idx++) { p = source->arData + idx; @@ -1543,7 +1549,7 @@ ZEND_API HashTable *zend_array_dup(HashTable *source) } } if (target->nNumOfElements > 0 && - target->nInternalPointer == INVALID_IDX) { + target->nInternalPointer == HT_INVALID_IDX) { idx = 0; while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) { idx++; @@ -1552,9 +1558,8 @@ ZEND_API HashTable *zend_array_dup(HashTable *source) } } else { target->nNextFreeElement = source->nNextFreeElement; - target->arData = (Bucket *) pemalloc(target->nTableSize * (sizeof(Bucket) + sizeof(uint32_t)), 0); - target->arHash = (uint32_t*)(target->arData + target->nTableSize); - memset(target->arHash, INVALID_IDX, target->nTableSize * sizeof(uint32_t)); + HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target))); + HT_HASH_RESET(target); for (idx = 0; idx < source->nNumUsed; idx++) { p = source->arData + idx; @@ -1578,9 +1583,9 @@ ZEND_API HashTable *zend_array_dup(HashTable *source) if (q->key) { zend_string_addref(q->key); } - nIndex = q->h & target->nTableMask; - Z_NEXT(q->val) = target->arHash[nIndex]; - target->arHash[nIndex] = target_idx; + nIndex = q->h | target->nTableMask; + Z_NEXT(q->val) = HT_HASH(target, nIndex); + HT_HASH(target, nIndex) = HT_IDX_TO_HASH(target_idx); if (Z_OPT_REFCOUNTED_P(data)) { if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1) { ZVAL_COPY(&q->val, Z_REFVAL_P(data)); @@ -1595,7 +1600,7 @@ ZEND_API HashTable *zend_array_dup(HashTable *source) target->nNumUsed = target_idx; target->nNumOfElements = target_idx; if (target->nNumOfElements > 0 && - target->nInternalPointer == INVALID_IDX) { + target->nInternalPointer == HT_INVALID_IDX) { target->nInternalPointer = 0; } } @@ -1603,8 +1608,7 @@ ZEND_API HashTable *zend_array_dup(HashTable *source) target->nNumUsed = 0; target->nNumOfElements = 0; target->nNextFreeElement = 0; - target->arData = NULL; - target->arHash = (uint32_t*)&uninitialized_bucket; + HT_SET_DATA_ADDR(target, &uninitialized_bucket); } return target; } @@ -1790,7 +1794,7 @@ ZEND_API void zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *p return; } } - *pos = INVALID_IDX; + *pos = HT_INVALID_IDX; } @@ -1812,7 +1816,7 @@ ZEND_API void zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos return; } } - *pos = INVALID_IDX; + *pos = HT_INVALID_IDX; } @@ -1823,11 +1827,11 @@ ZEND_API int zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos) IS_CONSISTENT(ht); HT_ASSERT(ht->nInternalPointer != &pos || GC_REFCOUNT(ht) == 1); - if (idx != INVALID_IDX) { + if (idx != HT_INVALID_IDX) { while (1) { idx++; if (idx >= ht->nNumUsed) { - *pos = INVALID_IDX; + *pos = HT_INVALID_IDX; return SUCCESS; } if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) { @@ -1847,7 +1851,7 @@ ZEND_API int zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos) IS_CONSISTENT(ht); HT_ASSERT(ht->nInternalPointer != &pos || GC_REFCOUNT(ht) == 1); - if (idx != INVALID_IDX) { + if (idx != HT_INVALID_IDX) { while (idx > 0) { idx--; if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) { @@ -1855,7 +1859,7 @@ ZEND_API int zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos) return SUCCESS; } } - *pos = INVALID_IDX; + *pos = HT_INVALID_IDX; return SUCCESS; } else { return FAILURE; @@ -1870,7 +1874,7 @@ ZEND_API int zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str Bucket *p; IS_CONSISTENT(ht); - if (idx != INVALID_IDX) { + if (idx != HT_INVALID_IDX) { p = ht->arData + idx; if (p->key) { *str_index = p->key; @@ -1889,7 +1893,7 @@ ZEND_API void zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, Bucket *p; IS_CONSISTENT(ht); - if (idx == INVALID_IDX) { + if (idx == HT_INVALID_IDX) { ZVAL_NULL(key); } else { p = ht->arData + idx; @@ -1907,7 +1911,7 @@ ZEND_API int zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos) Bucket *p; IS_CONSISTENT(ht); - if (idx != INVALID_IDX) { + if (idx != HT_INVALID_IDX) { p = ht->arData + idx; if (p->key) { return HASH_KEY_IS_STRING; @@ -1925,7 +1929,7 @@ ZEND_API zval *zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos) Bucket *p; IS_CONSISTENT(ht); - if (idx != INVALID_IDX) { + if (idx != HT_INVALID_IDX) { p = ht->arData + idx; return &p->val; } else { @@ -2024,10 +2028,15 @@ ZEND_API int zend_hash_sort_ex(HashTable *ht, sort_func_t sort, compare_func_t c } } else { if (renumber) { + void *old_data = HT_GET_DATA_ADDR(ht); + Bucket *old_buckets = ht->arData; + ht->u.flags |= HASH_FLAG_PACKED; - ht->nTableMask = 0; - zend_hash_realloc(ht, ht->nTableSize * sizeof(Bucket)); - ht->arHash = (uint32_t*)&uninitialized_bucket; + ht->nTableMask = HT_MIN_MASK; + HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT & HASH_FLAG_PERSISTENT)); + memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); + pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT & HASH_FLAG_PERSISTENT); + HT_HASH_RESET_PACKED(ht); } else { zend_hash_rehash(ht); } diff --git a/Zend/zend_hash.h b/Zend/zend_hash.h index 02991a643b..8767aee458 100644 --- a/Zend/zend_hash.h +++ b/Zend/zend_hash.h @@ -34,8 +34,6 @@ #define HASH_ADD_NEW (1<<3) #define HASH_ADD_NEXT (1<<4) -#define INVALID_IDX ((uint32_t) -1) - #define HASH_FLAG_PERSISTENT (1<<0) #define HASH_FLAG_APPLY_PROTECTION (1<<1) #define HASH_FLAG_PACKED (1<<2) @@ -820,9 +818,9 @@ static zend_always_inline void _zend_hash_append(HashTable *ht, zend_string *key ZVAL_COPY_VALUE(&p->val, zv); p->key = zend_string_copy(key); p->h = zend_string_hash_val(key); - nIndex = p->h & ht->nTableMask; - Z_NEXT(p->val) = ht->arHash[nIndex]; - ht->arHash[nIndex] = idx; + nIndex = p->h | ht->nTableMask; + Z_NEXT(p->val) = HT_HASH(ht, nIndex); + HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx); ht->nNumUsed = idx + 1; ht->nNumOfElements++; } @@ -836,9 +834,9 @@ static zend_always_inline void _zend_hash_append_ptr(HashTable *ht, zend_string ZVAL_PTR(&p->val, ptr); p->key = zend_string_copy(key); p->h = zend_string_hash_val(key); - nIndex = p->h & ht->nTableMask; - Z_NEXT(p->val) = ht->arHash[nIndex]; - ht->arHash[nIndex] = idx; + nIndex = p->h | ht->nTableMask; + Z_NEXT(p->val) = HT_HASH(ht, nIndex); + HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx); ht->nNumUsed = idx + 1; ht->nNumOfElements++; } @@ -852,9 +850,9 @@ static zend_always_inline void _zend_hash_append_ind(HashTable *ht, zend_string ZVAL_INDIRECT(&p->val, ptr); p->key = zend_string_copy(key); p->h = zend_string_hash_val(key); - nIndex = p->h & ht->nTableMask; - Z_NEXT(p->val) = ht->arHash[nIndex]; - ht->arHash[nIndex] = idx; + nIndex = p->h | ht->nTableMask; + Z_NEXT(p->val) = HT_HASH(ht, nIndex); + HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx); ht->nNumUsed = idx + 1; ht->nNumOfElements++; } diff --git a/Zend/zend_string.c b/Zend/zend_string.c index 634e2c8104..f20304570a 100644 --- a/Zend/zend_string.c +++ b/Zend/zend_string.c @@ -50,10 +50,9 @@ void zend_interned_strings_init(void) zend_hash_init(&CG(interned_strings), 1024, NULL, _str_dtor, 1); - CG(interned_strings).nTableMask = CG(interned_strings).nTableSize - 1; - CG(interned_strings).arData = (Bucket*) pecalloc(CG(interned_strings).nTableSize, sizeof(Bucket), 1); - CG(interned_strings).arHash = (uint32_t*) pecalloc(CG(interned_strings).nTableSize, sizeof(uint32_t), 1); - memset(CG(interned_strings).arHash, INVALID_IDX, CG(interned_strings).nTableSize * sizeof(uint32_t)); + CG(interned_strings).nTableMask = -CG(interned_strings).nTableSize; + HT_SET_DATA_ADDR(&CG(interned_strings), pemalloc(HT_SIZE(&CG(interned_strings)), 1)); + HT_HASH_RESET(&CG(interned_strings)); /* interned empty string */ str = zend_string_alloc(sizeof("")-1, 1); @@ -89,10 +88,10 @@ static zend_string *zend_new_interned_string_int(zend_string *str) } h = zend_string_hash_val(str); - nIndex = h & CG(interned_strings).nTableMask; - idx = CG(interned_strings).arHash[nIndex]; - while (idx != INVALID_IDX) { - p = CG(interned_strings).arData + idx; + nIndex = h | CG(interned_strings).nTableMask; + idx = HT_HASH(&CG(interned_strings), nIndex); + while (idx != HT_INVALID_IDX) { + p = HT_HASH_TO_BUCKET(&CG(interned_strings), idx); if ((p->h == h) && (p->key->len == str->len)) { if (!memcmp(p->key->val, str->val, str->len)) { zend_string_release(str); @@ -107,18 +106,25 @@ static zend_string *zend_new_interned_string_int(zend_string *str) if (CG(interned_strings).nNumUsed >= CG(interned_strings).nTableSize) { if (CG(interned_strings).nTableSize < HT_MAX_SIZE) { /* Let's double the table size */ - Bucket *d = (Bucket *) perealloc_recoverable(CG(interned_strings).arData, (CG(interned_strings).nTableSize << 1) * sizeof(Bucket), 1); - uint32_t *h = (uint32_t *) perealloc_recoverable(CG(interned_strings).arHash, (CG(interned_strings).nTableSize << 1) * sizeof(uint32_t), 1); - - if (d && h) { - HANDLE_BLOCK_INTERRUPTIONS(); - CG(interned_strings).arData = d; - CG(interned_strings).arHash = h; - CG(interned_strings).nTableSize = (CG(interned_strings).nTableSize << 1); - CG(interned_strings).nTableMask = CG(interned_strings).nTableSize - 1; + void *new_data; + void *old_data = HT_GET_DATA_ADDR(&CG(interned_strings)); + Bucket *old_buckets = CG(interned_strings).arData; + + HANDLE_BLOCK_INTERRUPTIONS(); + CG(interned_strings).nTableSize += CG(interned_strings).nTableSize; + CG(interned_strings).nTableMask = -CG(interned_strings).nTableSize; + new_data = malloc(HT_SIZE(&CG(interned_strings))); + + if (new_data) { + HT_SET_DATA_ADDR(&CG(interned_strings), new_data); + memcpy(CG(interned_strings).arData, old_buckets, sizeof(Bucket) * CG(interned_strings).nNumUsed); + free(old_data); zend_hash_rehash(&CG(interned_strings)); - HANDLE_UNBLOCK_INTERRUPTIONS(); + } else { + CG(interned_strings).nTableSize = CG(interned_strings).nTableSize >> 1; + CG(interned_strings).nTableMask = -CG(interned_strings).nTableSize; } + HANDLE_UNBLOCK_INTERRUPTIONS(); } } @@ -131,9 +137,9 @@ static zend_string *zend_new_interned_string_int(zend_string *str) p->key = str; Z_STR(p->val) = str; Z_TYPE_INFO(p->val) = IS_INTERNED_STRING_EX; - nIndex = h & CG(interned_strings).nTableMask; - Z_NEXT(p->val) = CG(interned_strings).arHash[nIndex]; - CG(interned_strings).arHash[nIndex] = idx; + nIndex = h | CG(interned_strings).nTableMask; + Z_NEXT(p->val) = HT_HASH(&CG(interned_strings), nIndex); + HT_HASH(&CG(interned_strings), nIndex) = HT_IDX_TO_HASH(idx); HANDLE_UNBLOCK_INTERRUPTIONS(); @@ -178,15 +184,15 @@ static void zend_interned_strings_restore_int(void) GC_REFCOUNT(p->key) = 1; zend_string_free(p->key); - nIndex = p->h & CG(interned_strings).nTableMask; - if (CG(interned_strings).arHash[nIndex] == idx) { - CG(interned_strings).arHash[nIndex] = Z_NEXT(p->val); + nIndex = p->h | CG(interned_strings).nTableMask; + if (HT_HASH(&CG(interned_strings), nIndex) == HT_IDX_TO_HASH(idx)) { + HT_HASH(&CG(interned_strings), nIndex) = Z_NEXT(p->val); } else { - uint prev = CG(interned_strings).arHash[nIndex]; - while (Z_NEXT(CG(interned_strings).arData[prev].val) != idx) { - prev = Z_NEXT(CG(interned_strings).arData[prev].val); + uint32_t prev = HT_HASH(&CG(interned_strings), nIndex); + while (Z_NEXT(HT_HASH_TO_BUCKET(&CG(interned_strings), prev)->val) != idx) { + prev = Z_NEXT(HT_HASH_TO_BUCKET(&CG(interned_strings), prev)->val); } - Z_NEXT(CG(interned_strings).arData[prev].val) = Z_NEXT(p->val); + Z_NEXT(HT_HASH_TO_BUCKET(&CG(interned_strings), prev)->val) = Z_NEXT(p->val); } } #endif diff --git a/Zend/zend_types.h b/Zend/zend_types.h index 33fb78b6b9..09560943e5 100644 --- a/Zend/zend_types.h +++ b/Zend/zend_types.h @@ -173,27 +173,80 @@ struct _zend_array { } v; uint32_t flags; } u; - uint32_t nTableSize; uint32_t nTableMask; + Bucket *arData; uint32_t nNumUsed; uint32_t nNumOfElements; + uint32_t nTableSize; uint32_t nInternalPointer; zend_long nNextFreeElement; - Bucket *arData; - uint32_t *arHash; dtor_func_t pDestructor; }; +/* + * HashTable Data Layout + * ===================== + * + * +=============================+ + * | HT_HASH(ht, ht->nTableMask) | + * | ... | + * | HT_HASH(ht, -1) | + * +-----------------------------+ + * ht->arData ---> | Bucket[0] | + * | ... | + * | Bucket[ht->nTableSize-1] | + * +=============================+ + */ + +#define HT_INVALID_IDX ((uint32_t) -1) + +#define HT_MIN_MASK ((uint32_t) -2) #define HT_MIN_SIZE 8 #if SIZEOF_SIZE_T == 4 # define HT_MAX_SIZE 0x04000000 /* small enough to avoid overflow checks */ +# define HT_HASH_TO_BUCKET_EX(data, idx) \ + ((Bucket*)((char*)(data) + (idx))) +# define HT_IDX_TO_HASH(idx) \ + ((idx) * sizeof(Bucket)) #elif SIZEOF_SIZE_T == 8 # define HT_MAX_SIZE 0x80000000 +# define HT_HASH_TO_BUCKET_EX(data, idx) \ + ((data) + (idx)) +# define HT_IDX_TO_HASH(idx) \ + (idx) #else # error "Unknown SIZEOF_SIZE_T" #endif +#define HT_HASH_EX(data, idx) \ + ((uint32_t*)(data))[(int)(idx)] +#define HT_HASH(ht, idx) \ + HT_HASH_EX((ht)->arData, idx) + +#define HT_HASH_SIZE(ht) \ + ((-(int)(ht)->nTableMask) * sizeof(uint32_t)) +#define HT_DATA_SIZE(ht) \ + ((ht)->nTableSize * sizeof(Bucket)) +#define HT_SIZE(ht) \ + (HT_HASH_SIZE(ht) + HT_DATA_SIZE(ht)) +#define HT_USED_SIZE(ht) \ + (HT_HASH_SIZE(ht) + ((ht)->nNumUsed * sizeof(Bucket))) +#define HT_HASH_RESET(ht) \ + memset(&HT_HASH(ht, (ht)->nTableMask), HT_INVALID_IDX, HT_HASH_SIZE(ht)) +#define HT_HASH_RESET_PACKED(ht) do { \ + HT_HASH(ht, -2) = HT_INVALID_IDX; \ + HT_HASH(ht, -1) = HT_INVALID_IDX; \ + } while (0) +#define HT_HASH_TO_BUCKET(ht, idx) \ + HT_HASH_TO_BUCKET_EX((ht)->arData, idx) + +#define HT_SET_DATA_ADDR(ht, ptr) do { \ + (ht)->arData = (Bucket*)(((char*)(ptr)) + HT_HASH_SIZE(ht)); \ + } while (0) +#define HT_GET_DATA_ADDR(ht) \ + ((char*)((ht)->arData) - HT_HASH_SIZE(ht)) + typedef uint32_t HashPosition; typedef struct _HashTableIterator { diff --git a/Zend/zend_vm_def.h b/Zend/zend_vm_def.h index 5a1dece134..e456bf268c 100644 --- a/Zend/zend_vm_def.h +++ b/Zend/zend_vm_def.h @@ -5383,7 +5383,7 @@ ZEND_VM_HANDLER(78, ZEND_FE_FETCH_R, VAR, ANY) while (1) { pos++; if (pos >= fe_ht->nNumUsed) { - pos = INVALID_IDX; + pos = HT_INVALID_IDX; break; } p = fe_ht->arData + pos; @@ -5497,7 +5497,7 @@ ZEND_VM_HANDLER(126, ZEND_FE_FETCH_RW, VAR, ANY) while (1) { pos++; if (pos >= fe_ht->nNumUsed) { - pos = INVALID_IDX; + pos = HT_INVALID_IDX; break; } p = fe_ht->arData + pos; @@ -5561,7 +5561,7 @@ ZEND_VM_HANDLER(126, ZEND_FE_FETCH_RW, VAR, ANY) while (1) { pos++; if (pos >= fe_ht->nNumUsed) { - pos = INVALID_IDX; + pos = HT_INVALID_IDX; break; } p = fe_ht->arData + pos; diff --git a/Zend/zend_vm_execute.h b/Zend/zend_vm_execute.h index af085f6b27..c239e2c886 100644 --- a/Zend/zend_vm_execute.h +++ b/Zend/zend_vm_execute.h @@ -12961,7 +12961,7 @@ static ZEND_OPCODE_HANDLER_RET ZEND_FASTCALL ZEND_FE_FETCH_R_SPEC_VAR_HANDLER(ZE while (1) { pos++; if (pos >= fe_ht->nNumUsed) { - pos = INVALID_IDX; + pos = HT_INVALID_IDX; break; } p = fe_ht->arData + pos; @@ -13075,7 +13075,7 @@ static ZEND_OPCODE_HANDLER_RET ZEND_FASTCALL ZEND_FE_FETCH_RW_SPEC_VAR_HANDLER(Z while (1) { pos++; if (pos >= fe_ht->nNumUsed) { - pos = INVALID_IDX; + pos = HT_INVALID_IDX; break; } p = fe_ht->arData + pos; @@ -13139,7 +13139,7 @@ static ZEND_OPCODE_HANDLER_RET ZEND_FASTCALL ZEND_FE_FETCH_RW_SPEC_VAR_HANDLER(Z while (1) { pos++; if (pos >= fe_ht->nNumUsed) { - pos = INVALID_IDX; + pos = HT_INVALID_IDX; break; } p = fe_ht->arData + pos; diff --git a/ext/opcache/ZendAccelerator.c b/ext/opcache/ZendAccelerator.c index 7f4988fb3b..dc92bf7464 100644 --- a/ext/opcache/ZendAccelerator.c +++ b/ext/opcache/ZendAccelerator.c @@ -248,15 +248,15 @@ static void accel_interned_strings_restore_state(void) ZCSG(interned_strings).nNumUsed--; ZCSG(interned_strings).nNumOfElements--; - nIndex = p->h & ZCSG(interned_strings).nTableMask; - if (ZCSG(interned_strings).arHash[nIndex] == idx) { - ZCSG(interned_strings).arHash[nIndex] = Z_NEXT(p->val); + nIndex = p->h | ZCSG(interned_strings).nTableMask; + if (HT_HASH(&ZCSG(interned_strings), nIndex) == HT_IDX_TO_HASH(idx)) { + HT_HASH(&ZCSG(interned_strings), nIndex) = Z_NEXT(p->val); } else { - uint prev = ZCSG(interned_strings).arHash[nIndex]; - while (Z_NEXT(ZCSG(interned_strings).arData[prev].val) != idx) { - prev = Z_NEXT(ZCSG(interned_strings).arData[prev].val); + uint32_t prev = HT_HASH(&ZCSG(interned_strings), nIndex); + while (Z_NEXT(HT_HASH_TO_BUCKET(&ZCSG(interned_strings), prev)->val) != idx) { + prev = Z_NEXT(HT_HASH_TO_BUCKET(&ZCSG(interned_strings), prev)->val); } - Z_NEXT(ZCSG(interned_strings).arData[prev].val) = Z_NEXT(p->val); + Z_NEXT(HT_HASH_TO_BUCKET(&ZCSG(interned_strings), prev)->val) = Z_NEXT(p->val); } } } @@ -274,7 +274,7 @@ static zend_string *accel_find_interned_string(zend_string *str) zend_ulong h; uint nIndex; uint idx; - Bucket *p; + Bucket *arData, *p; if (IS_ACCEL_INTERNED(str)) { /* this is already an interned string */ @@ -282,12 +282,13 @@ static zend_string *accel_find_interned_string(zend_string *str) } h = zend_string_hash_val(str); - nIndex = h & ZCSG(interned_strings).nTableMask; + nIndex = h | ZCSG(interned_strings).nTableMask; /* check for existing interned string */ - idx = ZCSG(interned_strings).arHash[nIndex]; - while (idx != INVALID_IDX) { - p = ZCSG(interned_strings).arData + idx; + idx = HT_HASH(&ZCSG(interned_strings), nIndex); + arData = ZCSG(interned_strings).arData; + while (idx != HT_INVALID_IDX) { + p = HT_HASH_TO_BUCKET_EX(arData, idx); if ((p->h == h) && (p->key->len == str->len)) { if (!memcmp(p->key->val, str->val, str->len)) { return p->key; @@ -315,12 +316,12 @@ zend_string *accel_new_interned_string(zend_string *str) } h = zend_string_hash_val(str); - nIndex = h & ZCSG(interned_strings).nTableMask; + nIndex = h | ZCSG(interned_strings).nTableMask; /* check for existing interned string */ - idx = ZCSG(interned_strings).arHash[nIndex]; - while (idx != INVALID_IDX) { - p = ZCSG(interned_strings).arData + idx; + idx = HT_HASH(&ZCSG(interned_strings), nIndex); + while (idx != HT_INVALID_IDX) { + p = HT_HASH_TO_BUCKET(&ZCSG(interned_strings), idx); if ((p->h == h) && (p->key->len == str->len)) { if (!memcmp(p->key->val, str->val, str->len)) { zend_string_release(str); @@ -357,8 +358,8 @@ zend_string *accel_new_interned_string(zend_string *str) p->key->len = str->len; memcpy(p->key->val, str->val, str->len); ZVAL_INTERNED_STR(&p->val, p->key); - Z_NEXT(p->val) = ZCSG(interned_strings).arHash[nIndex]; - ZCSG(interned_strings).arHash[nIndex] = idx; + Z_NEXT(p->val) = HT_HASH(&ZCSG(interned_strings), nIndex); + HT_HASH(&ZCSG(interned_strings), nIndex) = HT_IDX_TO_HASH(idx); zend_string_release(str); return p->key; #else @@ -1941,20 +1942,20 @@ static void accel_fast_zval_dtor(zval *zvalue) static inline void zend_accel_fast_del_bucket(HashTable *ht, uint32_t idx, Bucket *p) { - uint32_t nIndex = p->h & ht->nTableMask; - uint32_t i = ht->arHash[nIndex]; + uint32_t nIndex = p->h | ht->nTableMask; + uint32_t i = HT_HASH(ht, nIndex); ht->nNumUsed--; ht->nNumOfElements--; if (idx != i) { - Bucket *prev = ht->arData + i; + Bucket *prev = HT_HASH_TO_BUCKET(ht, i); while (Z_NEXT(prev->val) != idx) { i = Z_NEXT(prev->val); - prev = ht->arData + i; + prev = HT_HASH_TO_BUCKET(ht, i); } Z_NEXT(prev->val) = Z_NEXT(p->val); } else { - ht->arHash[p->h & ht->nTableMask] = Z_NEXT(p->val); + HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val); } } @@ -1990,7 +1991,7 @@ static void zend_accel_fast_shutdown(void) } } } - zend_accel_fast_del_bucket(EG(function_table), _idx-1, _p); + zend_accel_fast_del_bucket(EG(function_table), HT_IDX_TO_HASH(_idx-1), _p); } } ZEND_HASH_FOREACH_END(); @@ -2025,7 +2026,7 @@ static void zend_accel_fast_shutdown(void) } ce->static_members_table = NULL; } - zend_accel_fast_del_bucket(EG(class_table), _idx-1, _p); + zend_accel_fast_del_bucket(EG(class_table), HT_IDX_TO_HASH(_idx-1), _p); } } ZEND_HASH_FOREACH_END(); @@ -2035,7 +2036,7 @@ static void zend_accel_fast_shutdown(void) if (c->flags & CONST_PERSISTENT) { break; } else { - zend_accel_fast_del_bucket(EG(zend_constants), _idx-1, _p); + zend_accel_fast_del_bucket(EG(zend_constants), HT_IDX_TO_HASH(_idx-1), _p); } } ZEND_HASH_FOREACH_END(); } @@ -2150,15 +2151,17 @@ static int zend_accel_init_shm(void) # ifndef ZTS zend_hash_init(&ZCSG(interned_strings), (ZCG(accel_directives).interned_strings_buffer * 1024 * 1024) / (sizeof(Bucket) + sizeof(Bucket*) + 8 /* average string length */), NULL, NULL, 1); if (ZCG(accel_directives).interned_strings_buffer) { - ZCSG(interned_strings).nTableMask = ZCSG(interned_strings).nTableSize - 1; - ZCSG(interned_strings).arData = zend_shared_alloc(ZCSG(interned_strings).nTableSize * sizeof(Bucket)); - ZCSG(interned_strings).arHash = (uint32_t*)zend_shared_alloc(ZCSG(interned_strings).nTableSize * sizeof(uint32_t)); + void *data; + + ZCSG(interned_strings).nTableMask = -ZCSG(interned_strings).nTableSize; + data = zend_shared_alloc(HT_SIZE(&ZCSG(interned_strings))); ZCSG(interned_strings_start) = zend_shared_alloc((ZCG(accel_directives).interned_strings_buffer * 1024 * 1024)); - if (!ZCSG(interned_strings).arData || !ZCSG(interned_strings_start)) { + if (!data || !ZCSG(interned_strings_start)) { zend_accel_error(ACCEL_LOG_FATAL, ACCELERATOR_PRODUCT_NAME " cannot allocate buffer for interned strings"); return FAILURE; } - memset(ZCSG(interned_strings).arHash, INVALID_IDX, ZCSG(interned_strings).nTableSize * sizeof(uint32_t)); + HT_SET_DATA_ADDR(&ZCSG(interned_strings), data); + HT_HASH_RESET(&ZCSG(interned_strings)); ZCSG(interned_strings_end) = ZCSG(interned_strings_start) + (ZCG(accel_directives).interned_strings_buffer * 1024 * 1024); ZCSG(interned_strings_top) = ZCSG(interned_strings_start); diff --git a/ext/opcache/zend_accelerator_util_funcs.c b/ext/opcache/zend_accelerator_util_funcs.c index 8c89a8d5d6..8b301bf429 100644 --- a/ext/opcache/zend_accelerator_util_funcs.c +++ b/ext/opcache/zend_accelerator_util_funcs.c @@ -40,8 +40,6 @@ typedef int (*id_function_t)(void *, void *); typedef void (*unique_copy_ctor_func_t)(void *pElement); -static const uint32_t uninitialized_bucket = {INVALID_IDX}; - static void zend_hash_clone_zval(HashTable *ht, HashTable *source, int bind); static zend_ast *zend_ast_clone(zend_ast *ast); @@ -290,24 +288,22 @@ static void zend_hash_clone_zval(HashTable *ht, HashTable *source, int bind) ht->nNextFreeElement = source->nNextFreeElement; ht->pDestructor = ZVAL_PTR_DTOR; ht->u.flags = (source->u.flags & HASH_FLAG_INITIALIZED) | HASH_FLAG_APPLY_PROTECTION; - ht->arData = NULL; - ht->arHash = NULL; - ht->nInternalPointer = source->nNumOfElements ? 0 : INVALID_IDX; + ht->nInternalPointer = source->nNumOfElements ? 0 : HT_INVALID_IDX; if (!(ht->u.flags & HASH_FLAG_INITIALIZED)) { - ht->arHash = (uint32_t*)&uninitialized_bucket; + ht->arData = source->arData; return; } if (source->u.flags & HASH_FLAG_PACKED) { ht->u.flags |= HASH_FLAG_PACKED; - ht->arData = (Bucket *) emalloc(ht->nTableSize * sizeof(Bucket)); - ht->arHash = (uint32_t*)&uninitialized_bucket; + HT_SET_DATA_ADDR(ht, (Bucket *) emalloc(HT_SIZE(ht))); + HT_HASH_RESET_PACKED(ht); for (idx = 0; idx < source->nNumUsed; idx++) { p = source->arData + idx; if (Z_TYPE(p->val) == IS_UNDEF) continue; - nIndex = p->h & ht->nTableMask; + nIndex = p->h | ht->nTableMask; r = ht->arData + ht->nNumUsed; q = ht->arData + p->h; @@ -326,19 +322,18 @@ static void zend_hash_clone_zval(HashTable *ht, HashTable *source, int bind) zend_clone_zval(&q->val, bind); } } else { - ht->arData = (Bucket *) emalloc(ht->nTableSize * (sizeof(Bucket) + sizeof(uint32_t))); - ht->arHash = (uint32_t*)(ht->arData + ht->nTableSize); - memset(ht->arHash, INVALID_IDX, sizeof(uint32_t) * ht->nTableSize); + HT_SET_DATA_ADDR(ht, emalloc(HT_SIZE(ht))); + HT_HASH_RESET(ht); for (idx = 0; idx < source->nNumUsed; idx++) { p = source->arData + idx; if (Z_TYPE(p->val) == IS_UNDEF) continue; - nIndex = p->h & ht->nTableMask; + nIndex = p->h | ht->nTableMask; /* Insert into hash collision list */ q = ht->arData + ht->nNumUsed; - Z_NEXT(q->val) = ht->arHash[nIndex]; - ht->arHash[nIndex] = ht->nNumUsed++; + Z_NEXT(q->val) = HT_HASH(ht, nIndex); + HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(ht->nNumUsed++); /* Initialize key */ q->h = p->h; @@ -369,28 +364,27 @@ static void zend_hash_clone_methods(HashTable *ht, HashTable *source, zend_class ht->nNextFreeElement = source->nNextFreeElement; ht->pDestructor = ZEND_FUNCTION_DTOR; ht->u.flags = (source->u.flags & HASH_FLAG_INITIALIZED); - ht->nInternalPointer = source->nNumOfElements ? 0 : INVALID_IDX; + ht->nInternalPointer = source->nNumOfElements ? 0 : HT_INVALID_IDX; if (!(ht->u.flags & HASH_FLAG_INITIALIZED)) { - ht->arHash = (uint32_t*)&uninitialized_bucket; + ht->arData = source->arData; return; } ZEND_ASSERT(!(source->u.flags & HASH_FLAG_PACKED)); - ht->arData = (Bucket *) emalloc(ht->nTableSize * (sizeof(Bucket) + sizeof(uint32_t))); - ht->arHash = (uint32_t *)(ht->arData + ht->nTableSize); - memset(ht->arHash, INVALID_IDX, sizeof(uint32_t) * ht->nTableSize); + HT_SET_DATA_ADDR(ht, emalloc(HT_SIZE(ht))); + HT_HASH_RESET(ht); for (idx = 0; idx < source->nNumUsed; idx++) { p = source->arData + idx; if (Z_TYPE(p->val) == IS_UNDEF) continue; - nIndex = p->h & ht->nTableMask; + nIndex = p->h | ht->nTableMask; /* Insert into hash collision list */ q = ht->arData + ht->nNumUsed; - Z_NEXT(q->val) = ht->arHash[nIndex]; - ht->arHash[nIndex] = ht->nNumUsed++; + Z_NEXT(q->val) = HT_HASH(ht, nIndex); + HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(ht->nNumUsed++); /* Initialize key */ q->h = p->h; @@ -428,28 +422,27 @@ static void zend_hash_clone_prop_info(HashTable *ht, HashTable *source, zend_cla ht->nNextFreeElement = source->nNextFreeElement; ht->pDestructor = NULL; ht->u.flags = (source->u.flags & HASH_FLAG_INITIALIZED); - ht->nInternalPointer = source->nNumOfElements ? 0 : INVALID_IDX; + ht->nInternalPointer = source->nNumOfElements ? 0 : HT_INVALID_IDX; if (!(ht->u.flags & HASH_FLAG_INITIALIZED)) { - ht->arHash = (uint32_t*)&uninitialized_bucket; + ht->arData = source->arData; return; } ZEND_ASSERT(!(source->u.flags & HASH_FLAG_PACKED)); - ht->arData = (Bucket *) emalloc(ht->nTableSize * (sizeof(Bucket) + sizeof(uint32_t))); - ht->arHash = (uint32_t*)(ht->arData + ht->nTableSize); - memset(ht->arHash, INVALID_IDX, sizeof(uint32_t) * ht->nTableSize); + HT_SET_DATA_ADDR(ht, emalloc(HT_SIZE(ht))); + HT_HASH_RESET(ht); for (idx = 0; idx < source->nNumUsed; idx++) { p = source->arData + idx; if (Z_TYPE(p->val) == IS_UNDEF) continue; - nIndex = p->h & ht->nTableMask; + nIndex = p->h | ht->nTableMask; /* Insert into hash collision list */ q = ht->arData + ht->nNumUsed; - Z_NEXT(q->val) = ht->arHash[nIndex]; - ht->arHash[nIndex] = ht->nNumUsed++; + Z_NEXT(q->val) = HT_HASH(ht, nIndex); + HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(ht->nNumUsed++); /* Initialize key */ q->h = p->h; @@ -666,7 +659,7 @@ static void zend_accel_function_hash_copy(HashTable *target, HashTable *source) } } } - target->nInternalPointer = target->nNumOfElements ? 0 : INVALID_IDX; + target->nInternalPointer = target->nNumOfElements ? 0 : HT_INVALID_IDX; return; failure: @@ -709,7 +702,7 @@ static void zend_accel_function_hash_copy_from_shm(HashTable *target, HashTable } Z_PTR_P(t) = ARENA_REALLOC(Z_PTR(p->val)); } - target->nInternalPointer = target->nNumOfElements ? 0 : INVALID_IDX; + target->nInternalPointer = target->nNumOfElements ? 0 : HT_INVALID_IDX; return; failure: @@ -754,7 +747,7 @@ static void zend_accel_class_hash_copy(HashTable *target, HashTable *source, uni pCopyConstructor(&Z_PTR_P(t)); } } - target->nInternalPointer = target->nNumOfElements ? 0 : INVALID_IDX; + target->nInternalPointer = target->nNumOfElements ? 0 : HT_INVALID_IDX; return; failure: diff --git a/ext/opcache/zend_persist.c b/ext/opcache/zend_persist.c index 2564f88476..bb0680035b 100644 --- a/ext/opcache/zend_persist.c +++ b/ext/opcache/zend_persist.c @@ -67,7 +67,8 @@ typedef void (*zend_persist_func_t)(zval*); static void zend_persist_zval(zval *z); static void zend_persist_zval_const(zval *z); -static const uint32_t uninitialized_bucket = {INVALID_IDX}; +static const uint32_t uninitialized_bucket[-HT_MIN_MASK] = + {HT_INVALID_IDX, HT_INVALID_IDX}; static void zend_hash_persist(HashTable *ht, zend_persist_func_t pPersistElement) { @@ -75,23 +76,23 @@ static void zend_hash_persist(HashTable *ht, zend_persist_func_t pPersistElement Bucket *p; if (!(ht->u.flags & HASH_FLAG_INITIALIZED)) { - ht->arHash = (uint32_t*)&uninitialized_bucket; + HT_SET_DATA_ADDR(ht, &uninitialized_bucket); return; } if (ht->u.flags & HASH_FLAG_PACKED) { - zend_accel_store(ht->arData, sizeof(Bucket) * ht->nNumUsed); - ht->arHash = (uint32_t*)&uninitialized_bucket; + void *data = HT_GET_DATA_ADDR(ht); + zend_accel_store(data, HT_USED_SIZE(ht)); + HT_SET_DATA_ADDR(ht, data); } else { - Bucket *d = (Bucket*)ZCG(mem); - uint32_t *h = (uint32_t*)(d + ht->nNumUsed); + void *data = ZCG(mem); + void *old_data = HT_GET_DATA_ADDR(ht); - ZCG(mem) = (void*)(h + ht->nTableSize); - memcpy(d, ht->arData, sizeof(Bucket) * ht->nNumUsed); - memcpy(h, ht->arHash, sizeof(uint32_t) * ht->nTableSize); - efree(ht->arData); - ht->arData = d; - ht->arHash = h; + ZCG(mem) = (void*)((char*)data + HT_USED_SIZE(ht)); + memcpy(data, old_data, HT_USED_SIZE(ht)); + efree(old_data); + HT_SET_DATA_ADDR(ht, data); } + for (idx = 0; idx < ht->nNumUsed; idx++) { p = ht->arData + idx; if (Z_TYPE(p->val) == IS_UNDEF) continue; @@ -112,21 +113,17 @@ static void zend_hash_persist_immutable(HashTable *ht) Bucket *p; if (!(ht->u.flags & HASH_FLAG_INITIALIZED)) { - ht->arHash = (uint32_t*)&uninitialized_bucket; + HT_SET_DATA_ADDR(ht, &uninitialized_bucket); return; } if (ht->u.flags & HASH_FLAG_PACKED) { - ht->arData = zend_accel_memdup(ht->arData, sizeof(Bucket) * ht->nNumUsed); - ht->arHash = (uint32_t*)&uninitialized_bucket; + HT_SET_DATA_ADDR(ht, zend_accel_memdup(HT_GET_DATA_ADDR(ht), HT_USED_SIZE(ht))); } else { - Bucket *d = (Bucket*)ZCG(mem); - uint32_t *h = (uint32_t*)(d + ht->nNumUsed); - - ZCG(mem) = (void*)(h + ht->nTableSize); - memcpy(d, ht->arData, sizeof(Bucket) * ht->nNumUsed); - memcpy(h, ht->arHash, sizeof(uint32_t) * ht->nTableSize); - ht->arData = d; - ht->arHash = h; + void *data = ZCG(mem); + + ZCG(mem) = (void*)((char*)data + HT_USED_SIZE(ht)); + memcpy(data, HT_GET_DATA_ADDR(ht), HT_USED_SIZE(ht)); + HT_SET_DATA_ADDR(ht, data); } for (idx = 0; idx < ht->nNumUsed; idx++) { p = ht->arData + idx; diff --git a/ext/opcache/zend_persist_calc.c b/ext/opcache/zend_persist_calc.c index de48ab53d1..a3f8abd134 100644 --- a/ext/opcache/zend_persist_calc.c +++ b/ext/opcache/zend_persist_calc.c @@ -57,11 +57,8 @@ static void zend_hash_persist_calc(HashTable *ht, void (*pPersistElement)(zval * if (!(ht->u.flags & HASH_FLAG_INITIALIZED)) { return; } - if (ht->u.flags & HASH_FLAG_PACKED) { - ADD_SIZE(sizeof(Bucket) * ht->nNumUsed); - } else { - ADD_SIZE(sizeof(Bucket) * ht->nNumUsed + sizeof(uint32_t) * ht->nTableSize); - } + + ADD_SIZE(HT_USED_SIZE(ht)); for (idx = 0; idx < ht->nNumUsed; idx++) { p = ht->arData + idx; diff --git a/ext/spl/spl_array.c b/ext/spl/spl_array.c index 7c75511c24..b5b9129698 100644 --- a/ext/spl/spl_array.c +++ b/ext/spl/spl_array.c @@ -111,7 +111,7 @@ static void spl_array_rewind(spl_array_object *intern); static void spl_array_update_pos(HashTable *ht, spl_array_object* intern) /* {{{ */ { uint pos = intern->pos; - if (pos != INVALID_IDX) { + if (pos != HT_INVALID_IDX) { intern->pos_h = ht->arData[pos].h; } } /* }}} */ @@ -134,12 +134,14 @@ SPL_API int spl_hash_verify_pos_ex(spl_array_object * intern, HashTable * ht) /* return SUCCESS; } } else { - idx = ht->arHash[intern->pos_h & ht->nTableMask]; - while (idx != INVALID_IDX) { - if (idx == intern->pos) { + uint32_t pos = HT_IDX_TO_HASH(intern->pos); + + idx = HT_HASH(ht, intern->pos_h | ht->nTableMask); + while (idx != HT_INVALID_IDX) { + if (idx == pos) { return SUCCESS; } - idx = Z_NEXT(ht->arData[idx].val); + idx = Z_NEXT(HT_HASH_TO_BUCKET(ht, idx)->val); } } /* HASH_UNPROTECT_RECURSION(ht); */ @@ -714,7 +716,7 @@ static inline int spl_array_object_verify_pos_ex(spl_array_object *object, HashT return FAILURE; } - if (object->pos != INVALID_IDX && (object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, ht) == FAILURE) { + if (object->pos != HT_INVALID_IDX && (object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, ht) == FAILURE) { php_error_docref(NULL, E_NOTICE, "%sArray was modified outside object and internal position is no longer valid", msg_prefix); return FAILURE; } @@ -783,7 +785,7 @@ void spl_array_iterator_append(zval *object, zval *append_value) /* {{{ */ } spl_array_write_dimension(object, NULL, append_value); - if (intern->pos == INVALID_IDX) { + if (intern->pos == HT_INVALID_IDX) { if (aht->nNumUsed && !Z_ISUNDEF(aht->arData[aht->nNumUsed-1].val)) { spl_array_set_pos(intern, aht, aht->nNumUsed - 1); } @@ -1420,7 +1422,7 @@ int static spl_array_object_count_elements_helper(spl_array_object *intern, zend pos = intern->pos; *count = 0; spl_array_rewind(intern); - while(intern->pos != INVALID_IDX && spl_array_next(intern) == SUCCESS) { + while(intern->pos != HT_INVALID_IDX && spl_array_next(intern) == SUCCESS) { (*count)++; } spl_array_set_pos(intern, aht, pos); diff --git a/ext/spl/spl_observer.c b/ext/spl/spl_observer.c index 6316431635..ae35f8aec2 100644 --- a/ext/spl/spl_observer.c +++ b/ext/spl/spl_observer.c @@ -258,7 +258,7 @@ static zend_object *spl_object_storage_new_ex(zend_class_entry *class_type, zval intern = emalloc(sizeof(spl_SplObjectStorage) + zend_object_properties_size(parent)); memset(intern, 0, sizeof(spl_SplObjectStorage) - sizeof(zval)); - intern->pos = INVALID_IDX; + intern->pos = HT_INVALID_IDX; zend_object_std_init(&intern->std, class_type); object_properties_init(&intern->std, class_type); diff --git a/ext/standard/array.c b/ext/standard/array.c index 63bf5e95c5..9ed3671fcb 100644 --- a/ext/standard/array.c +++ b/ext/standard/array.c @@ -2103,7 +2103,6 @@ static void php_splice(HashTable *in_hash, int offset, int length, HashTable *re in_hash->nNumOfElements = out_hash.nNumOfElements; in_hash->nNextFreeElement = out_hash.nNextFreeElement; in_hash->arData = out_hash.arData; - in_hash->arHash = out_hash.arHash; in_hash->pDestructor = out_hash.pDestructor; zend_hash_internal_pointer_reset(in_hash); @@ -2383,7 +2382,6 @@ PHP_FUNCTION(array_unshift) Z_ARRVAL_P(stack)->nNumOfElements = new_hash.nNumOfElements; Z_ARRVAL_P(stack)->nNextFreeElement = new_hash.nNextFreeElement; Z_ARRVAL_P(stack)->arData = new_hash.arData; - Z_ARRVAL_P(stack)->arHash = new_hash.arHash; Z_ARRVAL_P(stack)->pDestructor = new_hash.pDestructor; zend_hash_internal_pointer_reset(Z_ARRVAL_P(stack)); |