diff options
author | Dmitry Stogov <dmitry@zend.com> | 2015-03-13 17:13:19 +0300 |
---|---|---|
committer | Dmitry Stogov <dmitry@zend.com> | 2015-03-13 17:13:19 +0300 |
commit | 2b42d719084631d255ec7ebb6c2928b9339915c2 (patch) | |
tree | 33321998e169cfa41435609895c0d6b379dcbdff /Zend/zend_hash.c | |
parent | 0a4a11b73ae32b31810451d1f7e8719ca0a503db (diff) | |
download | php-git-2b42d719084631d255ec7ebb6c2928b9339915c2.tar.gz |
Changed HashTable layout:
Removed HashTable->arHash (reduced memory consumption). Now hash slots may be accessed using HT_HASH() macro.
Hash slotas are allocated together with Buckets (before them) and lay in reverse order from HashTable->arData base address (see comments in Zend/zend_types.h)
Indexes in hash table and conflict resolution chains (Z_NEXT) may be stored as indeces or offsets in bytes, depending on system (32 or 64-bit).
HashTable data filelds are reordered to keep the most useful for zend_hash_find() data in the same CPU cache line.
Diffstat (limited to 'Zend/zend_hash.c')
-rw-r--r-- | Zend/zend_hash.c | 319 |
1 files changed, 164 insertions, 155 deletions
diff --git a/Zend/zend_hash.c b/Zend/zend_hash.c index 8a9a032e16..681b8d0ec2 100644 --- a/Zend/zend_hash.c +++ b/Zend/zend_hash.c @@ -126,13 +126,13 @@ static void zend_always_inline zend_hash_check_init(HashTable *ht, int packed) if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) { if (packed) { (ht)->u.flags |= HASH_FLAG_INITIALIZED | HASH_FLAG_PACKED; - (ht)->arData = (Bucket *) pemalloc((ht)->nTableSize * sizeof(Bucket), (ht)->u.flags & HASH_FLAG_PERSISTENT); + HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT)); + HT_HASH_RESET_PACKED(ht); } else { (ht)->u.flags |= HASH_FLAG_INITIALIZED; - (ht)->nTableMask = (ht)->nTableSize - 1; - (ht)->arData = (Bucket *) pemalloc((ht)->nTableSize * (sizeof(Bucket) + sizeof(uint32_t)), (ht)->u.flags & HASH_FLAG_PERSISTENT); - (ht)->arHash = (uint32_t*)((ht)->arData + (ht)->nTableSize); - memset((ht)->arHash, INVALID_IDX, (ht)->nTableSize * sizeof(uint32_t)); + (ht)->nTableMask = -(ht)->nTableSize; + HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT)); + HT_HASH_RESET(ht); } } } @@ -140,36 +140,22 @@ static void zend_always_inline zend_hash_check_init(HashTable *ht, int packed) #define CHECK_INIT(ht, packed) \ zend_hash_check_init(ht, packed) -static const uint32_t uninitialized_bucket = {INVALID_IDX}; +static const uint32_t uninitialized_bucket[-HT_MIN_MASK] = + {HT_INVALID_IDX, HT_INVALID_IDX}; ZEND_API void _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent ZEND_FILE_LINE_DC) { GC_REFCOUNT(ht) = 1; GC_TYPE_INFO(ht) = IS_ARRAY; + ht->u.flags = (persistent ? HASH_FLAG_PERSISTENT : 0) | HASH_FLAG_APPLY_PROTECTION; ht->nTableSize = zend_hash_check_size(nSize); - ht->nTableMask = 0; + ht->nTableMask = HT_MIN_MASK; + HT_SET_DATA_ADDR(ht, &uninitialized_bucket); ht->nNumUsed = 0; ht->nNumOfElements = 0; + ht->nInternalPointer = HT_INVALID_IDX; ht->nNextFreeElement = 0; - ht->arData = NULL; - ht->arHash = (uint32_t*)&uninitialized_bucket; ht->pDestructor = pDestructor; - ht->nInternalPointer = INVALID_IDX; - ht->u.flags = (persistent ? HASH_FLAG_PERSISTENT : 0) | HASH_FLAG_APPLY_PROTECTION; -} - -static zend_always_inline void zend_hash_realloc(HashTable *ht, size_t new_size) -{ -#if 1 - if (!(ht->u.flags & HASH_FLAG_PERSISTENT) && new_size <= ZEND_MM_MAX_SMALL_SIZE) { - Bucket *newData = emalloc(new_size); - memcpy(newData, ht->arData, ht->nNumUsed * sizeof(Bucket)); - efree(ht->arData); - ht->arData = newData; - return; - } -#endif - ht->arData = (Bucket *) perealloc2(ht->arData, new_size, ht->nNumUsed * sizeof(Bucket), ht->u.flags & HASH_FLAG_PERSISTENT); } static void zend_hash_packed_grow(HashTable *ht) @@ -180,7 +166,7 @@ static void zend_hash_packed_grow(HashTable *ht) } HANDLE_BLOCK_INTERRUPTIONS(); ht->nTableSize += ht->nTableSize; - zend_hash_realloc(ht, ht->nTableSize * sizeof(Bucket)); + HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT)); HANDLE_UNBLOCK_INTERRUPTIONS(); } @@ -194,24 +180,33 @@ ZEND_API void zend_hash_real_init(HashTable *ht, zend_bool packed) ZEND_API void zend_hash_packed_to_hash(HashTable *ht) { + void *old_data = HT_GET_DATA_ADDR(ht); + Bucket *old_buckets = ht->arData; + HT_ASSERT(GC_REFCOUNT(ht) == 1); HANDLE_BLOCK_INTERRUPTIONS(); ht->u.flags &= ~HASH_FLAG_PACKED; - ht->nTableMask = ht->nTableSize - 1; - zend_hash_realloc(ht, ht->nTableSize * (sizeof(Bucket) + sizeof(uint32_t))); - ht->arHash = (uint32_t*)(ht->arData + ht->nTableSize); + ht->nTableMask = -ht->nTableSize; + HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT)); + memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); + pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT); zend_hash_rehash(ht); HANDLE_UNBLOCK_INTERRUPTIONS(); } ZEND_API void zend_hash_to_packed(HashTable *ht) { + void *old_data = HT_GET_DATA_ADDR(ht); + Bucket *old_buckets = ht->arData; + HT_ASSERT(GC_REFCOUNT(ht) == 1); HANDLE_BLOCK_INTERRUPTIONS(); ht->u.flags |= HASH_FLAG_PACKED; - ht->nTableMask = 0; - zend_hash_realloc(ht, ht->nTableSize * sizeof(Bucket)); - ht->arHash = (uint32_t*)&uninitialized_bucket; + ht->nTableMask = HT_MIN_MASK; + HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT)); + HT_HASH_RESET_PACKED(ht); + memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); + pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT); HANDLE_UNBLOCK_INTERRUPTIONS(); } @@ -238,17 +233,21 @@ ZEND_API void zend_hash_extend(HashTable *ht, uint32_t nSize, zend_bool packed) if (nSize > ht->nTableSize) { HANDLE_BLOCK_INTERRUPTIONS(); ht->nTableSize = zend_hash_check_size(nSize); - zend_hash_realloc(ht, ht->nTableSize * sizeof(Bucket)); + HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT)); HANDLE_UNBLOCK_INTERRUPTIONS(); } } else { ZEND_ASSERT(!(ht->u.flags & HASH_FLAG_PACKED)); if (nSize > ht->nTableSize) { + void *old_data = HT_GET_DATA_ADDR(ht); + Bucket *old_buckets = ht->arData; + HANDLE_BLOCK_INTERRUPTIONS(); ht->nTableSize = zend_hash_check_size(nSize); - zend_hash_realloc(ht, ht->nTableSize * (sizeof(Bucket) + sizeof(uint32_t))); - ht->arHash = (uint32_t*)(ht->arData + ht->nTableSize); - ht->nTableMask = ht->nTableSize - 1; + ht->nTableMask = -ht->nTableSize; + HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT)); + memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); + pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT); zend_hash_rehash(ht); HANDLE_UNBLOCK_INTERRUPTIONS(); } @@ -307,8 +306,8 @@ ZEND_API HashPosition zend_hash_iterator_pos(uint32_t idx, HashTable *ht) HashTableIterator *iter = EG(ht_iterators) + idx; ZEND_ASSERT(idx != (uint32_t)-1); - if (iter->pos == INVALID_IDX) { - return INVALID_IDX; + if (iter->pos == HT_INVALID_IDX) { + return HT_INVALID_IDX; } else if (UNEXPECTED(iter->ht != ht)) { if (EXPECTED(iter->ht) && EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) { iter->ht->u.v.nIteratorsCount--; @@ -372,7 +371,7 @@ ZEND_API HashPosition zend_hash_iterators_lower_pos(HashTable *ht, HashPosition { HashTableIterator *iter = EG(ht_iterators); HashTableIterator *end = iter + EG(ht_iterators_used); - HashPosition res = INVALID_IDX; + HashPosition res = HT_INVALID_IDX; while (iter != end) { if (iter->ht == ht) { @@ -403,18 +402,19 @@ static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, zen zend_ulong h; uint32_t nIndex; uint32_t idx; - Bucket *p; + Bucket *p, *arData; h = zend_string_hash_val(key); - nIndex = h & ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - p = ht->arData + idx; - if ((p->key == key) || /* check for the the same interned string */ - (p->h == h && - p->key && - p->key->len == key->len && - memcmp(p->key->val, key->val, key->len) == 0)) { + arData = ht->arData; + nIndex = h | ht->nTableMask; + idx = HT_HASH_EX(arData, nIndex); + while (idx != HT_INVALID_IDX) { + p = HT_HASH_TO_BUCKET_EX(arData, idx); + if (p->key == key || /* check for the the same interned string */ + (p->h == h && + p->key && + p->key->len == key->len && + memcmp(p->key->val, key->val, key->len) == 0)) { return p; } idx = Z_NEXT(p->val); @@ -426,13 +426,14 @@ static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, { uint32_t nIndex; uint32_t idx; - Bucket *p; - - nIndex = h & ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - ZEND_ASSERT(idx < ht->nTableSize); - p = ht->arData + idx; + Bucket *p, *arData; + + arData = ht->arData; + nIndex = h | ht->nTableMask; + idx = HT_HASH_EX(arData, nIndex); + while (idx != HT_INVALID_IDX) { + ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize)); + p = HT_HASH_TO_BUCKET_EX(arData, idx); if ((p->h == h) && p->key && (p->key->len == len) @@ -448,13 +449,14 @@ static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *h { uint32_t nIndex; uint32_t idx; - Bucket *p; - - nIndex = h & ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - ZEND_ASSERT(idx < ht->nTableSize); - p = ht->arData + idx; + Bucket *p, *arData; + + arData = ht->arData; + nIndex = h | ht->nTableMask; + idx = HT_HASH_EX(arData, nIndex); + while (idx != HT_INVALID_IDX) { + ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize)); + p = HT_HASH_TO_BUCKET_EX(arData, idx); if (p->h == h && !p->key) { return p; } @@ -508,18 +510,18 @@ add_to_hash: HANDLE_BLOCK_INTERRUPTIONS(); idx = ht->nNumUsed++; ht->nNumOfElements++; - if (ht->nInternalPointer == INVALID_IDX) { + if (ht->nInternalPointer == HT_INVALID_IDX) { ht->nInternalPointer = idx; } - zend_hash_iterators_update(ht, INVALID_IDX, idx); + zend_hash_iterators_update(ht, HT_INVALID_IDX, idx); p = ht->arData + idx; p->h = h = zend_string_hash_val(key); p->key = key; zend_string_addref(key); ZVAL_COPY_VALUE(&p->val, pData); - nIndex = h & ht->nTableMask; - Z_NEXT(p->val) = ht->arHash[nIndex]; - ht->arHash[nIndex] = idx; + nIndex = h | ht->nTableMask; + Z_NEXT(p->val) = HT_HASH(ht, nIndex); + HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx); HANDLE_UNBLOCK_INTERRUPTIONS(); return &p->val; @@ -677,10 +679,10 @@ add_to_packed: ht->nNumUsed = h + 1; } ht->nNumOfElements++; - if (ht->nInternalPointer == INVALID_IDX) { + if (ht->nInternalPointer == HT_INVALID_IDX) { ht->nInternalPointer = h; } - zend_hash_iterators_update(ht, INVALID_IDX, h); + zend_hash_iterators_update(ht, HT_INVALID_IDX, h); if ((zend_long)h >= (zend_long)ht->nNextFreeElement) { ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX; } @@ -720,20 +722,20 @@ add_to_hash: HANDLE_BLOCK_INTERRUPTIONS(); idx = ht->nNumUsed++; ht->nNumOfElements++; - if (ht->nInternalPointer == INVALID_IDX) { + if (ht->nInternalPointer == HT_INVALID_IDX) { ht->nInternalPointer = idx; } - zend_hash_iterators_update(ht, INVALID_IDX, idx); + zend_hash_iterators_update(ht, HT_INVALID_IDX, idx); if ((zend_long)h >= (zend_long)ht->nNextFreeElement) { ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX; } p = ht->arData + idx; p->h = h; p->key = NULL; - nIndex = h & ht->nTableMask; + nIndex = h | ht->nTableMask; ZVAL_COPY_VALUE(&p->val, pData); - Z_NEXT(p->val) = ht->arHash[nIndex]; - ht->arHash[nIndex] = idx; + Z_NEXT(p->val) = HT_HASH(ht, nIndex); + HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx); HANDLE_UNBLOCK_INTERRUPTIONS(); return &p->val; @@ -780,11 +782,15 @@ static void zend_hash_do_resize(HashTable *ht) zend_hash_rehash(ht); HANDLE_UNBLOCK_INTERRUPTIONS(); } else if (ht->nTableSize < HT_MAX_SIZE) { /* Let's double the table size */ + void *old_data = HT_GET_DATA_ADDR(ht); + Bucket *old_buckets = ht->arData; + HANDLE_BLOCK_INTERRUPTIONS(); ht->nTableSize += ht->nTableSize; - zend_hash_realloc(ht, ht->nTableSize * (sizeof(Bucket) + sizeof(uint32_t))); - ht->arHash = (uint32_t*)(ht->arData + ht->nTableSize); - ht->nTableMask = ht->nTableSize - 1; + ht->nTableMask = -ht->nTableSize; + HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT)); + memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); + pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT); zend_hash_rehash(ht); HANDLE_UNBLOCK_INTERRUPTIONS(); } else { @@ -801,12 +807,12 @@ ZEND_API int zend_hash_rehash(HashTable *ht) if (UNEXPECTED(ht->nNumOfElements == 0)) { if (ht->u.flags & HASH_FLAG_INITIALIZED) { - memset(ht->arHash, INVALID_IDX, ht->nTableSize * sizeof(uint32_t)); + HT_HASH_RESET(ht); } return SUCCESS; } - memset(ht->arHash, INVALID_IDX, ht->nTableSize * sizeof(uint32_t)); + HT_HASH_RESET(ht); if (EXPECTED(ht->u.v.nIteratorsCount == 0)) { for (i = 0, j = 0; i < ht->nNumUsed; i++) { p = ht->arData + i; @@ -817,9 +823,9 @@ ZEND_API int zend_hash_rehash(HashTable *ht) ht->nInternalPointer = j; } } - nIndex = ht->arData[j].h & ht->nTableMask; - Z_NEXT(ht->arData[j].val) = ht->arHash[nIndex]; - ht->arHash[nIndex] = j; + nIndex = ht->arData[j].h | ht->nTableMask; + Z_NEXT(ht->arData[j].val) = HT_HASH(ht, nIndex); + HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j); j++; } } else { @@ -838,9 +844,9 @@ ZEND_API int zend_hash_rehash(HashTable *ht) iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1); } } - nIndex = ht->arData[j].h & ht->nTableMask; - Z_NEXT(ht->arData[j].val) = ht->arHash[nIndex]; - ht->arHash[nIndex] = j; + nIndex = ht->arData[j].h | ht->nTableMask; + Z_NEXT(ht->arData[j].val) = HT_HASH(ht, nIndex); + HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j); j++; } } @@ -855,7 +861,7 @@ static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, if (prev) { Z_NEXT(prev->val) = Z_NEXT(p->val); } else { - ht->arHash[p->h & ht->nTableMask] = Z_NEXT(p->val); + HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val); } } if (ht->nNumUsed - 1 == idx) { @@ -870,7 +876,7 @@ static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, while (1) { new_idx++; if (new_idx >= ht->nNumUsed) { - new_idx = INVALID_IDX; + new_idx = HT_INVALID_IDX; break; } else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) { break; @@ -900,14 +906,14 @@ static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bu Bucket *prev = NULL; if (!(ht->u.flags & HASH_FLAG_PACKED)) { - uint32_t nIndex = p->h & ht->nTableMask; - uint32_t i = ht->arHash[nIndex]; + uint32_t nIndex = p->h | ht->nTableMask; + uint32_t i = HT_HASH(ht, nIndex); if (i != idx) { - prev = ht->arData + i; + prev = HT_HASH_TO_BUCKET(ht, i); while (Z_NEXT(prev->val) != idx) { i = Z_NEXT(prev->val); - prev = ht->arData + i; + prev = HT_HASH_TO_BUCKET(ht, i); } } } @@ -927,11 +933,11 @@ ZEND_API int zend_hash_del(HashTable *ht, zend_string *key) HT_ASSERT(GC_REFCOUNT(ht) == 1); h = zend_string_hash_val(key); - nIndex = h & ht->nTableMask; + nIndex = h | ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - p = ht->arData + idx; + idx = HT_HASH(ht, nIndex); + while (idx != HT_INVALID_IDX) { + p = HT_HASH_TO_BUCKET(ht, idx); if ((p->key == key) || (p->h == h && p->key && @@ -958,11 +964,11 @@ ZEND_API int zend_hash_del_ind(HashTable *ht, zend_string *key) HT_ASSERT(GC_REFCOUNT(ht) == 1); h = zend_string_hash_val(key); - nIndex = h & ht->nTableMask; + nIndex = h | ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - p = ht->arData + idx; + idx = HT_HASH(ht, nIndex); + while (idx != HT_INVALID_IDX) { + p = HT_HASH_TO_BUCKET(ht, idx); if ((p->key == key) || (p->h == h && p->key && @@ -1002,11 +1008,11 @@ ZEND_API int zend_hash_str_del(HashTable *ht, const char *str, size_t len) HT_ASSERT(GC_REFCOUNT(ht) == 1); h = zend_inline_hash_func(str, len); - nIndex = h & ht->nTableMask; + nIndex = h | ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - p = ht->arData + idx; + idx = HT_HASH(ht, nIndex); + while (idx != HT_INVALID_IDX) { + p = HT_HASH_TO_BUCKET(ht, idx); if ((p->h == h) && p->key && (p->key->len == len) @@ -1045,11 +1051,11 @@ ZEND_API int zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len) HT_ASSERT(GC_REFCOUNT(ht) == 1); h = zend_inline_hash_func(str, len); - nIndex = h & ht->nTableMask; + nIndex = h | ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - p = ht->arData + idx; + idx = HT_HASH(ht, nIndex); + while (idx != HT_INVALID_IDX) { + p = HT_HASH_TO_BUCKET(ht, idx); if ((p->h == h) && p->key && (p->key->len == len) @@ -1083,11 +1089,11 @@ ZEND_API int zend_hash_index_del(HashTable *ht, zend_ulong h) } return FAILURE; } - nIndex = h & ht->nTableMask; + nIndex = h | ht->nTableMask; - idx = ht->arHash[nIndex]; - while (idx != INVALID_IDX) { - p = ht->arData + idx; + idx = HT_HASH(ht, nIndex); + while (idx != HT_INVALID_IDX) { + p = HT_HASH_TO_BUCKET(ht, idx); if ((p->h == h) && (p->key == NULL)) { _zend_hash_del_el_ex(ht, idx, p, prev); return SUCCESS; @@ -1144,7 +1150,7 @@ ZEND_API void zend_hash_destroy(HashTable *ht) } else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) { return; } - pefree(ht->arData, ht->u.flags & HASH_FLAG_PERSISTENT); + pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT); } ZEND_API void zend_array_destroy(HashTable *ht) @@ -1185,7 +1191,7 @@ ZEND_API void zend_array_destroy(HashTable *ht) } else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) { goto free_ht; } - efree(ht->arData); + efree(HT_GET_DATA_ADDR(ht)); free_ht: FREE_HASHTABLE(ht); } @@ -1229,13 +1235,13 @@ ZEND_API void zend_hash_clean(HashTable *ht) } } if (!(ht->u.flags & HASH_FLAG_PACKED)) { - memset(ht->arHash, INVALID_IDX, ht->nTableSize * sizeof(uint32_t)); + HT_HASH_RESET(ht); } } ht->nNumUsed = 0; ht->nNumOfElements = 0; ht->nNextFreeElement = 0; - ht->nInternalPointer = INVALID_IDX; + ht->nInternalPointer = HT_INVALID_IDX; } ZEND_API void zend_symtable_clean(HashTable *ht) @@ -1257,13 +1263,13 @@ ZEND_API void zend_symtable_clean(HashTable *ht) } } while (++p != end); if (!(ht->u.flags & HASH_FLAG_PACKED)) { - memset(ht->arHash, INVALID_IDX, ht->nTableSize * sizeof(uint32_t)); + HT_HASH_RESET(ht); } } ht->nNumUsed = 0; ht->nNumOfElements = 0; ht->nNextFreeElement = 0; - ht->nInternalPointer = INVALID_IDX; + ht->nInternalPointer = HT_INVALID_IDX; } ZEND_API void zend_hash_graceful_destroy(HashTable *ht) @@ -1277,10 +1283,10 @@ ZEND_API void zend_hash_graceful_destroy(HashTable *ht) for (idx = 0; idx < ht->nNumUsed; idx++) { p = ht->arData + idx; if (Z_TYPE(p->val) == IS_UNDEF) continue; - _zend_hash_del_el(ht, idx, p); + _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); } if (ht->u.flags & HASH_FLAG_INITIALIZED) { - pefree(ht->arData, ht->u.flags & HASH_FLAG_PERSISTENT); + pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT); } SET_INCONSISTENT(HT_DESTROYED); @@ -1299,11 +1305,11 @@ ZEND_API void zend_hash_graceful_reverse_destroy(HashTable *ht) idx--; p = ht->arData + idx; if (Z_TYPE(p->val) == IS_UNDEF) continue; - _zend_hash_del_el(ht, idx, p); + _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); } if (ht->u.flags & HASH_FLAG_INITIALIZED) { - pefree(ht->arData, ht->u.flags & HASH_FLAG_PERSISTENT); + pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT); } SET_INCONSISTENT(HT_DESTROYED); @@ -1335,7 +1341,7 @@ ZEND_API void zend_hash_apply(HashTable *ht, apply_func_t apply_func) result = apply_func(&p->val); if (result & ZEND_HASH_APPLY_REMOVE) { - _zend_hash_del_el(ht, idx, p); + _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); } if (result & ZEND_HASH_APPLY_STOP) { break; @@ -1362,7 +1368,7 @@ ZEND_API void zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t appl result = apply_func(&p->val, argument); if (result & ZEND_HASH_APPLY_REMOVE) { - _zend_hash_del_el(ht, idx, p); + _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); } if (result & ZEND_HASH_APPLY_STOP) { break; @@ -1395,7 +1401,7 @@ ZEND_API void zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t ap result = apply_func(&p->val, num_args, args, &hash_key); if (result & ZEND_HASH_APPLY_REMOVE) { - _zend_hash_del_el(ht, idx, p); + _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); } if (result & ZEND_HASH_APPLY_STOP) { va_end(args); @@ -1427,7 +1433,7 @@ ZEND_API void zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func) result = apply_func(&p->val); if (result & ZEND_HASH_APPLY_REMOVE) { - _zend_hash_del_el(ht, idx, p); + _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p); } if (result & ZEND_HASH_APPLY_STOP) { break; @@ -1448,13 +1454,13 @@ ZEND_API void zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_fun IS_CONSISTENT(target); HT_ASSERT(GC_REFCOUNT(target) == 1); - setTargetPointer = (target->nInternalPointer == INVALID_IDX); + setTargetPointer = (target->nInternalPointer == HT_INVALID_IDX); for (idx = 0; idx < source->nNumUsed; idx++) { p = source->arData + idx; if (Z_TYPE(p->val) == IS_UNDEF) continue; if (setTargetPointer && source->nInternalPointer == idx) { - target->nInternalPointer = INVALID_IDX; + target->nInternalPointer = HT_INVALID_IDX; } /* INDIRECT element may point to UNDEF-ined slots */ data = &p->val; @@ -1473,7 +1479,7 @@ ZEND_API void zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_fun pCopyConstructor(new_entry); } } - if (target->nInternalPointer == INVALID_IDX && target->nNumOfElements > 0) { + if (target->nInternalPointer == HT_INVALID_IDX && target->nNumOfElements > 0) { idx = 0; while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) { idx++; @@ -1500,7 +1506,7 @@ ZEND_API HashTable *zend_array_dup(HashTable *source) target->nTableMask = source->nTableMask; target->nTableSize = source->nTableSize; target->pDestructor = source->pDestructor; - target->nInternalPointer = INVALID_IDX; + target->nInternalPointer = HT_INVALID_IDX; target->u.flags = (source->u.flags & ~HASH_FLAG_PERSISTENT) | HASH_FLAG_APPLY_PROTECTION; target_idx = 0; @@ -1509,9 +1515,9 @@ ZEND_API HashTable *zend_array_dup(HashTable *source) target->nNumUsed = source->nNumUsed; target->nNumOfElements = source->nNumOfElements; target->nNextFreeElement = source->nNextFreeElement; - target->arData = (Bucket *) pemalloc(target->nTableSize * sizeof(Bucket), 0); - target->arHash = (uint32_t*)&uninitialized_bucket; + HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target))); target->nInternalPointer = source->nInternalPointer; + HT_HASH_RESET_PACKED(target); for (idx = 0; idx < source->nNumUsed; idx++) { p = source->arData + idx; @@ -1543,7 +1549,7 @@ ZEND_API HashTable *zend_array_dup(HashTable *source) } } if (target->nNumOfElements > 0 && - target->nInternalPointer == INVALID_IDX) { + target->nInternalPointer == HT_INVALID_IDX) { idx = 0; while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) { idx++; @@ -1552,9 +1558,8 @@ ZEND_API HashTable *zend_array_dup(HashTable *source) } } else { target->nNextFreeElement = source->nNextFreeElement; - target->arData = (Bucket *) pemalloc(target->nTableSize * (sizeof(Bucket) + sizeof(uint32_t)), 0); - target->arHash = (uint32_t*)(target->arData + target->nTableSize); - memset(target->arHash, INVALID_IDX, target->nTableSize * sizeof(uint32_t)); + HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target))); + HT_HASH_RESET(target); for (idx = 0; idx < source->nNumUsed; idx++) { p = source->arData + idx; @@ -1578,9 +1583,9 @@ ZEND_API HashTable *zend_array_dup(HashTable *source) if (q->key) { zend_string_addref(q->key); } - nIndex = q->h & target->nTableMask; - Z_NEXT(q->val) = target->arHash[nIndex]; - target->arHash[nIndex] = target_idx; + nIndex = q->h | target->nTableMask; + Z_NEXT(q->val) = HT_HASH(target, nIndex); + HT_HASH(target, nIndex) = HT_IDX_TO_HASH(target_idx); if (Z_OPT_REFCOUNTED_P(data)) { if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1) { ZVAL_COPY(&q->val, Z_REFVAL_P(data)); @@ -1595,7 +1600,7 @@ ZEND_API HashTable *zend_array_dup(HashTable *source) target->nNumUsed = target_idx; target->nNumOfElements = target_idx; if (target->nNumOfElements > 0 && - target->nInternalPointer == INVALID_IDX) { + target->nInternalPointer == HT_INVALID_IDX) { target->nInternalPointer = 0; } } @@ -1603,8 +1608,7 @@ ZEND_API HashTable *zend_array_dup(HashTable *source) target->nNumUsed = 0; target->nNumOfElements = 0; target->nNextFreeElement = 0; - target->arData = NULL; - target->arHash = (uint32_t*)&uninitialized_bucket; + HT_SET_DATA_ADDR(target, &uninitialized_bucket); } return target; } @@ -1790,7 +1794,7 @@ ZEND_API void zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *p return; } } - *pos = INVALID_IDX; + *pos = HT_INVALID_IDX; } @@ -1812,7 +1816,7 @@ ZEND_API void zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos return; } } - *pos = INVALID_IDX; + *pos = HT_INVALID_IDX; } @@ -1823,11 +1827,11 @@ ZEND_API int zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos) IS_CONSISTENT(ht); HT_ASSERT(ht->nInternalPointer != &pos || GC_REFCOUNT(ht) == 1); - if (idx != INVALID_IDX) { + if (idx != HT_INVALID_IDX) { while (1) { idx++; if (idx >= ht->nNumUsed) { - *pos = INVALID_IDX; + *pos = HT_INVALID_IDX; return SUCCESS; } if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) { @@ -1847,7 +1851,7 @@ ZEND_API int zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos) IS_CONSISTENT(ht); HT_ASSERT(ht->nInternalPointer != &pos || GC_REFCOUNT(ht) == 1); - if (idx != INVALID_IDX) { + if (idx != HT_INVALID_IDX) { while (idx > 0) { idx--; if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) { @@ -1855,7 +1859,7 @@ ZEND_API int zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos) return SUCCESS; } } - *pos = INVALID_IDX; + *pos = HT_INVALID_IDX; return SUCCESS; } else { return FAILURE; @@ -1870,7 +1874,7 @@ ZEND_API int zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str Bucket *p; IS_CONSISTENT(ht); - if (idx != INVALID_IDX) { + if (idx != HT_INVALID_IDX) { p = ht->arData + idx; if (p->key) { *str_index = p->key; @@ -1889,7 +1893,7 @@ ZEND_API void zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, Bucket *p; IS_CONSISTENT(ht); - if (idx == INVALID_IDX) { + if (idx == HT_INVALID_IDX) { ZVAL_NULL(key); } else { p = ht->arData + idx; @@ -1907,7 +1911,7 @@ ZEND_API int zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos) Bucket *p; IS_CONSISTENT(ht); - if (idx != INVALID_IDX) { + if (idx != HT_INVALID_IDX) { p = ht->arData + idx; if (p->key) { return HASH_KEY_IS_STRING; @@ -1925,7 +1929,7 @@ ZEND_API zval *zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos) Bucket *p; IS_CONSISTENT(ht); - if (idx != INVALID_IDX) { + if (idx != HT_INVALID_IDX) { p = ht->arData + idx; return &p->val; } else { @@ -2024,10 +2028,15 @@ ZEND_API int zend_hash_sort_ex(HashTable *ht, sort_func_t sort, compare_func_t c } } else { if (renumber) { + void *old_data = HT_GET_DATA_ADDR(ht); + Bucket *old_buckets = ht->arData; + ht->u.flags |= HASH_FLAG_PACKED; - ht->nTableMask = 0; - zend_hash_realloc(ht, ht->nTableSize * sizeof(Bucket)); - ht->arHash = (uint32_t*)&uninitialized_bucket; + ht->nTableMask = HT_MIN_MASK; + HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT & HASH_FLAG_PERSISTENT)); + memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); + pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT & HASH_FLAG_PERSISTENT); + HT_HASH_RESET_PACKED(ht); } else { zend_hash_rehash(ht); } |