summaryrefslogtreecommitdiff
path: root/Zend/zend_hash.c
diff options
context:
space:
mode:
Diffstat (limited to 'Zend/zend_hash.c')
-rw-r--r--Zend/zend_hash.c61
1 files changed, 20 insertions, 41 deletions
diff --git a/Zend/zend_hash.c b/Zend/zend_hash.c
index 5404187a97..b59d5885a6 100644
--- a/Zend/zend_hash.c
+++ b/Zend/zend_hash.c
@@ -92,7 +92,7 @@ static void _zend_is_inconsistent(const HashTable *ht, const char *file, int lin
static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
-static uint32_t zend_always_inline zend_hash_check_size(uint32_t nSize)
+static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize)
{
#if defined(ZEND_WIN32)
unsigned long index;
@@ -103,7 +103,7 @@ static uint32_t zend_always_inline zend_hash_check_size(uint32_t nSize)
if (nSize < HT_MIN_SIZE) {
nSize = HT_MIN_SIZE;
} else if (UNEXPECTED(nSize >= HT_MAX_SIZE)) {
- zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
+ zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
}
#if defined(ZEND_WIN32)
@@ -127,7 +127,7 @@ static uint32_t zend_always_inline zend_hash_check_size(uint32_t nSize)
#endif
}
-static void zend_always_inline zend_hash_real_init_ex(HashTable *ht, int packed)
+static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, int packed)
{
HT_ASSERT(GC_REFCOUNT(ht) == 1);
ZEND_ASSERT(!((ht)->u.flags & HASH_FLAG_INITIALIZED));
@@ -139,7 +139,7 @@ static void zend_always_inline zend_hash_real_init_ex(HashTable *ht, int packed)
(ht)->nTableMask = -(ht)->nTableSize;
HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
(ht)->u.flags |= HASH_FLAG_INITIALIZED;
- if (EXPECTED(ht->nTableMask == -8)) {
+ if (EXPECTED(ht->nTableMask == (uint32_t)-8)) {
Bucket *arData = ht->arData;
HT_HASH_EX(arData, -8) = -1;
@@ -156,7 +156,7 @@ static void zend_always_inline zend_hash_real_init_ex(HashTable *ht, int packed)
}
}
-static void zend_always_inline zend_hash_check_init(HashTable *ht, int packed)
+static zend_always_inline void zend_hash_check_init(HashTable *ht, int packed)
{
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
@@ -189,12 +189,10 @@ static void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
{
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (ht->nTableSize >= HT_MAX_SIZE) {
- zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
+ zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
}
- HANDLE_BLOCK_INTERRUPTIONS();
ht->nTableSize += ht->nTableSize;
HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
- HANDLE_UNBLOCK_INTERRUPTIONS();
}
ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, zend_bool packed)
@@ -211,7 +209,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
Bucket *old_buckets = ht->arData;
HT_ASSERT(GC_REFCOUNT(ht) == 1);
- HANDLE_BLOCK_INTERRUPTIONS();
ht->u.flags &= ~HASH_FLAG_PACKED;
new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, -ht->nTableSize), (ht)->u.flags & HASH_FLAG_PERSISTENT);
ht->nTableMask = -ht->nTableSize;
@@ -219,7 +216,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
zend_hash_rehash(ht);
- HANDLE_UNBLOCK_INTERRUPTIONS();
}
ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
@@ -228,7 +224,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
Bucket *old_buckets = ht->arData;
HT_ASSERT(GC_REFCOUNT(ht) == 1);
- HANDLE_BLOCK_INTERRUPTIONS();
new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (ht)->u.flags & HASH_FLAG_PERSISTENT);
ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
ht->nTableMask = HT_MIN_MASK;
@@ -236,7 +231,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
HT_HASH_RESET_PACKED(ht);
memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
- HANDLE_UNBLOCK_INTERRUPTIONS();
}
ZEND_API void ZEND_FASTCALL _zend_hash_init_ex(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent, zend_bool bApplyProtection ZEND_FILE_LINE_DC)
@@ -260,10 +254,8 @@ ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend
if (packed) {
ZEND_ASSERT(ht->u.flags & HASH_FLAG_PACKED);
if (nSize > ht->nTableSize) {
- HANDLE_BLOCK_INTERRUPTIONS();
ht->nTableSize = zend_hash_check_size(nSize);
HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
- HANDLE_UNBLOCK_INTERRUPTIONS();
}
} else {
ZEND_ASSERT(!(ht->u.flags & HASH_FLAG_PACKED));
@@ -271,7 +263,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend
void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
Bucket *old_buckets = ht->arData;
nSize = zend_hash_check_size(nSize);
- HANDLE_BLOCK_INTERRUPTIONS();
new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT);
ht->nTableSize = nSize;
ht->nTableMask = -ht->nTableSize;
@@ -279,7 +270,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend
memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
zend_hash_rehash(ht);
- HANDLE_UNBLOCK_INTERRUPTIONS();
}
}
}
@@ -591,12 +581,10 @@ static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_s
data = Z_INDIRECT_P(data);
}
}
- HANDLE_BLOCK_INTERRUPTIONS();
if (ht->pDestructor) {
ht->pDestructor(data);
}
ZVAL_COPY_VALUE(data, pData);
- HANDLE_UNBLOCK_INTERRUPTIONS();
return data;
}
}
@@ -604,7 +592,6 @@ static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_s
ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
add_to_hash:
- HANDLE_BLOCK_INTERRUPTIONS();
idx = ht->nNumUsed++;
ht->nNumOfElements++;
if (ht->nInternalPointer == HT_INVALID_IDX) {
@@ -623,7 +610,6 @@ add_to_hash:
nIndex = h | ht->nTableMask;
Z_NEXT(p->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
- HANDLE_UNBLOCK_INTERRUPTIONS();
return &p->val;
}
@@ -762,7 +748,6 @@ static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht,
}
add_to_packed:
- HANDLE_BLOCK_INTERRUPTIONS();
/* incremental initialization of empty Buckets */
if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) == (HASH_ADD_NEW|HASH_ADD_NEXT)) {
ht->nNumUsed = h + 1;
@@ -788,8 +773,6 @@ add_to_packed:
p->key = NULL;
ZVAL_COPY_VALUE(&p->val, pData);
- HANDLE_UNBLOCK_INTERRUPTIONS();
-
return &p->val;
convert_to_hash:
@@ -801,12 +784,10 @@ convert_to_hash:
return NULL;
}
ZEND_ASSERT(&p->val != pData);
- HANDLE_BLOCK_INTERRUPTIONS();
if (ht->pDestructor) {
ht->pDestructor(&p->val);
}
ZVAL_COPY_VALUE(&p->val, pData);
- HANDLE_UNBLOCK_INTERRUPTIONS();
if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
}
@@ -817,7 +798,6 @@ convert_to_hash:
ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
add_to_hash:
- HANDLE_BLOCK_INTERRUPTIONS();
idx = ht->nNumUsed++;
ht->nNumOfElements++;
if (ht->nInternalPointer == HT_INVALID_IDX) {
@@ -834,7 +814,6 @@ add_to_hash:
ZVAL_COPY_VALUE(&p->val, pData);
Z_NEXT(p->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
- HANDLE_UNBLOCK_INTERRUPTIONS();
return &p->val;
}
@@ -876,15 +855,12 @@ static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
- HANDLE_BLOCK_INTERRUPTIONS();
zend_hash_rehash(ht);
- HANDLE_UNBLOCK_INTERRUPTIONS();
} else if (ht->nTableSize < HT_MAX_SIZE) { /* Let's double the table size */
void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
uint32_t nSize = ht->nTableSize + ht->nTableSize;
Bucket *old_buckets = ht->arData;
- HANDLE_BLOCK_INTERRUPTIONS();
new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT);
ht->nTableSize = nSize;
ht->nTableMask = -ht->nTableSize;
@@ -892,9 +868,8 @@ static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
zend_hash_rehash(ht);
- HANDLE_UNBLOCK_INTERRUPTIONS();
} else {
- zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
+ zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
}
}
@@ -984,7 +959,6 @@ ZEND_API int ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
{
- HANDLE_BLOCK_INTERRUPTIONS();
if (!(ht->u.flags & HASH_FLAG_PACKED)) {
if (prev) {
Z_NEXT(prev->val) = Z_NEXT(p->val);
@@ -1027,7 +1001,6 @@ static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx,
} else {
ZVAL_UNDEF(&p->val);
}
- HANDLE_UNBLOCK_INTERRUPTIONS();
}
static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
@@ -1525,7 +1498,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_fu
int result;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
HASH_PROTECT_RECURSION(ht);
for (idx = 0; idx < ht->nNumUsed; idx++) {
@@ -1534,6 +1506,7 @@ ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_fu
result = apply_func(&p->val);
if (result & ZEND_HASH_APPLY_REMOVE) {
+ HT_ASSERT(GC_REFCOUNT(ht) == 1);
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
@@ -1551,7 +1524,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_f
int result;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
HASH_PROTECT_RECURSION(ht);
for (idx = 0; idx < ht->nNumUsed; idx++) {
@@ -1560,6 +1532,7 @@ ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_f
result = apply_func(&p->val, argument);
if (result & ZEND_HASH_APPLY_REMOVE) {
+ HT_ASSERT(GC_REFCOUNT(ht) == 1);
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
@@ -1579,7 +1552,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_apply_with_arguments(HashTable *ht, apply_
int result;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
HASH_PROTECT_RECURSION(ht);
@@ -1593,6 +1565,7 @@ ZEND_API void ZEND_FASTCALL zend_hash_apply_with_arguments(HashTable *ht, apply_
result = apply_func(&p->val, num_args, args, &hash_key);
if (result & ZEND_HASH_APPLY_REMOVE) {
+ HT_ASSERT(GC_REFCOUNT(ht) == 1);
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
@@ -1613,7 +1586,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t
int result;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
HASH_PROTECT_RECURSION(ht);
idx = ht->nNumUsed;
@@ -1625,6 +1597,7 @@ ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t
result = apply_func(&p->val);
if (result & ZEND_HASH_APPLY_REMOVE) {
+ HT_ASSERT(GC_REFCOUNT(ht) == 1);
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
@@ -2038,6 +2011,15 @@ ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulon
return p ? &p->val : NULL;
}
+ZEND_API zval* ZEND_FASTCALL _zend_hash_index_find(const HashTable *ht, zend_ulong h)
+{
+ Bucket *p;
+
+ IS_CONSISTENT(ht);
+
+ p = zend_hash_index_find_bucket(ht, h);
+ return p ? &p->val : NULL;
+}
ZEND_API zend_bool ZEND_FASTCALL zend_hash_index_exists(const HashTable *ht, zend_ulong h)
{
@@ -2287,7 +2269,6 @@ ZEND_API int ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, co
(swap_func_t)(renumber? zend_hash_bucket_renum_swap :
((ht->u.flags & HASH_FLAG_PACKED) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
- HANDLE_BLOCK_INTERRUPTIONS();
ht->nNumUsed = i;
ht->nInternalPointer = 0;
@@ -2324,8 +2305,6 @@ ZEND_API int ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, co
}
}
- HANDLE_UNBLOCK_INTERRUPTIONS();
-
return SUCCESS;
}