summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitry Stogov <dmitry@zend.com>2015-12-16 04:59:05 +0300
committerDmitry Stogov <dmitry@zend.com>2015-12-16 04:59:05 +0300
commit9044f491cc0263aa8a8d4537beda43d0d832c94d (patch)
tree6eb1c1098d1a92f9b3f134e0ebd7848248b75930
parentc78ebcad53522fe5c7416a9f67c4b26c45029edb (diff)
downloadphp-git-9044f491cc0263aa8a8d4537beda43d0d832c94d.tar.gz
Use do_alloca() instead of alloca()
-rw-r--r--ext/opcache/Optimizer/zend_call_graph.c5
-rw-r--r--ext/opcache/Optimizer/zend_cfg.c8
-rw-r--r--ext/opcache/Optimizer/zend_inference.c64
-rw-r--r--ext/opcache/Optimizer/zend_ssa.c80
-rw-r--r--ext/opcache/Optimizer/zend_worklist.h18
5 files changed, 122 insertions, 53 deletions
diff --git a/ext/opcache/Optimizer/zend_call_graph.c b/ext/opcache/Optimizer/zend_call_graph.c
index 86db61352d..2d163c5ae1 100644
--- a/ext/opcache/Optimizer/zend_call_graph.c
+++ b/ext/opcache/Optimizer/zend_call_graph.c
@@ -142,8 +142,10 @@ static int zend_analyze_calls(zend_arena **arena, zend_script *script, uint32_t
zend_function *func;
zend_call_info *call_info;
int call = 0;
- zend_call_info **call_stack = alloca((op_array->last / 2) * sizeof(zend_call_info*));
+ zend_call_info **call_stack;
+ ALLOCA_FLAG(use_heap);
+ call_stack = do_alloca((op_array->last / 2) * sizeof(zend_call_info*), use_heap);
while (opline != end) {
call_info = NULL;
switch (opline->opcode) {
@@ -199,6 +201,7 @@ static int zend_analyze_calls(zend_arena **arena, zend_script *script, uint32_t
}
opline++;
}
+ free_alloca(call_stack, use_heap);
return SUCCESS;
}
diff --git a/ext/opcache/Optimizer/zend_cfg.c b/ext/opcache/Optimizer/zend_cfg.c
index 7818836d44..c739931684 100644
--- a/ext/opcache/Optimizer/zend_cfg.c
+++ b/ext/opcache/Optimizer/zend_cfg.c
@@ -703,9 +703,11 @@ int zend_cfg_identify_loops(const zend_op_array *op_array, zend_cfg *cfg, uint32
int *dj_spanning_tree;
zend_worklist work;
int flag = ZEND_FUNC_NO_LOOPS;
+ ALLOCA_FLAG(list_use_heap);
+ ALLOCA_FLAG(tree_use_heap);
- ZEND_WORKLIST_ALLOCA(&work, cfg->blocks_count);
- dj_spanning_tree = alloca(sizeof(int) * cfg->blocks_count);
+ ZEND_WORKLIST_ALLOCA(&work, cfg->blocks_count, list_use_heap);
+ dj_spanning_tree = do_alloca(sizeof(int) * cfg->blocks_count, tree_use_heap);
for (i = 0; i < cfg->blocks_count; i++) {
dj_spanning_tree[i] = -1;
@@ -794,6 +796,8 @@ int zend_cfg_identify_loops(const zend_op_array *op_array, zend_cfg *cfg, uint32
}
}
+ free_alloca(dj_spanning_tree, tree_use_heap);
+ ZEND_WORKLIST_FREE_ALLOCA(&work, list_use_heap);
*flags |= flag;
return SUCCESS;
diff --git a/ext/opcache/Optimizer/zend_inference.c b/ext/opcache/Optimizer/zend_inference.c
index 25b7eb9275..4387571075 100644
--- a/ext/opcache/Optimizer/zend_inference.c
+++ b/ext/opcache/Optimizer/zend_inference.c
@@ -158,11 +158,14 @@ int zend_ssa_find_sccs(const zend_op_array *op_array, zend_ssa *ssa) /* {{{ */
int index = 0, *dfs, *root;
zend_worklist_stack stack;
int j;
+ ALLOCA_FLAG(dfs_use_heap);
+ ALLOCA_FLAG(root_use_heap);
+ ALLOCA_FLAG(stack_use_heap);
- dfs = alloca(sizeof(int) * ssa->vars_count);
+ dfs = do_alloca(sizeof(int) * ssa->vars_count, dfs_use_heap);
memset(dfs, -1, sizeof(int) * ssa->vars_count);
- root = alloca(sizeof(int) * ssa->vars_count);
- ZEND_WORKLIST_STACK_ALLOCA(&stack, ssa->vars_count);
+ root = do_alloca(sizeof(int) * ssa->vars_count, root_use_heap);
+ ZEND_WORKLIST_STACK_ALLOCA(&stack, ssa->vars_count, stack_use_heap);
/* Find SCCs */
for (j = 0; j < ssa->vars_count; j++) {
@@ -188,6 +191,10 @@ int zend_ssa_find_sccs(const zend_op_array *op_array, zend_ssa *ssa) /* {{{ */
}
}
+ ZEND_WORKLIST_STACK_FREE_ALLOCA(&stack, stack_use_heap);
+ free_alloca(root, root_use_heap);
+ free_alloca(dfs, dfs_use_heap);
+
return SUCCESS;
}
/* }}} */
@@ -200,11 +207,13 @@ int zend_ssa_find_false_dependencies(const zend_op_array *op_array, zend_ssa *ss
zend_bitset worklist;
int i, j, use;
zend_ssa_phi *p;
+ ALLOCA_FLAG(use_heap);
- if (!op_array->function_name || !ssa->vars || !ssa->ops)
+ if (!op_array->function_name || !ssa->vars || !ssa->ops) {
return SUCCESS;
+ }
- worklist = alloca(sizeof(zend_ulong) * zend_bitset_len(ssa_vars_count));
+ worklist = do_alloca(sizeof(zend_ulong) * zend_bitset_len(ssa_vars_count), use_heap);
memset(worklist, 0, sizeof(zend_ulong) * zend_bitset_len(ssa_vars_count));
for (i = 0; i < ssa_vars_count; i++) {
@@ -243,6 +252,8 @@ int zend_ssa_find_false_dependencies(const zend_op_array *op_array, zend_ssa *ss
}
}
+ free_alloca(worklist, use_heap);
+
return SUCCESS;
}
/* }}} */
@@ -1757,13 +1768,16 @@ static int zend_check_inner_cycles(const zend_op_array *op_array, zend_ssa *ssa,
static void zend_infer_ranges_warmup(const zend_op_array *op_array, zend_ssa *ssa, int *scc_var, int *next_scc_var, int scc)
{
int worklist_len = zend_bitset_len(ssa->vars_count);
- zend_bitset worklist = alloca(sizeof(zend_ulong) * worklist_len);
- zend_bitset visited = alloca(sizeof(zend_ulong) * worklist_len);
+ zend_bitset worklist;
+ zend_bitset visited;
int j, n;
zend_ssa_range tmp;
#ifdef NEG_RANGE
int has_inner_cycles = 0;
+ ALLOCA_FLAG(use_heap);
+ worklist = do_alloca(sizeof(zend_ulong) * worklist_len * 2, use_heap);
+ visited = worklist + worklist_len;
memset(worklist, 0, sizeof(zend_ulong) * worklist_len);
memset(visited, 0, sizeof(zend_ulong) * worklist_len);
j= scc_var[scc];
@@ -1860,17 +1874,26 @@ static void zend_infer_ranges_warmup(const zend_op_array *op_array, zend_ssa *ss
}
}
}
+ free_alloca(worklist, use_heap);
}
static int zend_infer_ranges(const zend_op_array *op_array, zend_ssa *ssa) /* {{{ */
{
int worklist_len = zend_bitset_len(ssa->vars_count);
- zend_bitset worklist = alloca(sizeof(zend_ulong) * worklist_len);
- int *next_scc_var = alloca(sizeof(int) * ssa->vars_count);
- int *scc_var = alloca(sizeof(int) * ssa->sccs);
+ zend_bitset worklist;
+ int *next_scc_var;
+ int *scc_var;
zend_ssa_phi *p;
zend_ssa_range tmp;
int scc, j;
+ ALLOCA_FLAG(use_heap);
+
+ worklist = do_alloca(
+ sizeof(zend_ulong) * worklist_len +
+ sizeof(int) * ssa->vars_count +
+ sizeof(int) * ssa->sccs, use_heap);
+ next_scc_var = (int*)(worklist + worklist_len);
+ scc_var = next_scc_var + ssa->vars_count;
#ifdef LOG_SSA_RANGE
fprintf(stderr, "Range Inference\n");
@@ -1949,6 +1972,8 @@ static int zend_infer_ranges(const zend_op_array *op_array, zend_ssa *ssa) /* {{
}
}
+ free_alloca(worklist, use_heap);
+
return SUCCESS;
}
/* }}} */
@@ -3655,10 +3680,11 @@ static int zend_type_narrowing(const zend_op_array *op_array, const zend_script
int ssa_vars_count = ssa->vars_count;
int j;
zend_bitset worklist, types;
+ ALLOCA_FLAG(use_heap);
- types = alloca(sizeof(zend_ulong) * op_array->last_var);
+ types = do_alloca(sizeof(zend_ulong) * (op_array->last_var + zend_bitset_len(ssa_vars_count)), use_heap);
memset(types, 0, sizeof(zend_ulong) * op_array->last_var);
- worklist = alloca(sizeof(zend_ulong) * zend_bitset_len(ssa_vars_count));
+ worklist = types + op_array->last_var;
memset(worklist, 0, sizeof(zend_ulong) * zend_bitset_len(ssa_vars_count));
/* Find variables that may be only LONG or DOUBLE */
@@ -3673,6 +3699,7 @@ static int zend_type_narrowing(const zend_op_array *op_array, const zend_script
}
}
if (zend_bitset_empty(worklist, zend_bitset_len(ssa_vars_count))) {
+ free_alloca(types, use_heap);
return SUCCESS;
}
@@ -3694,6 +3721,7 @@ static int zend_type_narrowing(const zend_op_array *op_array, const zend_script
}
}
if (zend_bitset_empty(worklist, zend_bitset_len(ssa_vars_count))) {
+ free_alloca(types, use_heap);
return SUCCESS;
}
@@ -3708,6 +3736,7 @@ static int zend_type_narrowing(const zend_op_array *op_array, const zend_script
}
}
if (zend_bitset_empty(worklist, zend_bitset_len(ssa_vars_count))) {
+ free_alloca(types, use_heap);
return SUCCESS;
}
@@ -3731,9 +3760,11 @@ static int zend_type_narrowing(const zend_op_array *op_array, const zend_script
}
if (zend_infer_types_ex(op_array, script, ssa, worklist) != SUCCESS) {
+ free_alloca(types, use_heap);
return FAILURE;
}
+ free_alloca(types, use_heap);
return SUCCESS;
}
@@ -3957,8 +3988,9 @@ static int zend_infer_types(const zend_op_array *op_array, const zend_script *sc
int ssa_vars_count = ssa->vars_count;
int j;
zend_bitset worklist;
+ ALLOCA_FLAG(use_heap);
- worklist = alloca(sizeof(zend_ulong) * zend_bitset_len(ssa_vars_count));
+ worklist = do_alloca(sizeof(zend_ulong) * zend_bitset_len(ssa_vars_count), use_heap);
memset(worklist, 0, sizeof(zend_ulong) * zend_bitset_len(ssa_vars_count));
/* Type Inference */
@@ -3968,6 +4000,7 @@ static int zend_infer_types(const zend_op_array *op_array, const zend_script *sc
}
if (zend_infer_types_ex(op_array, script, ssa, worklist) != SUCCESS) {
+ free_alloca(worklist, use_heap);
return FAILURE;
}
@@ -3978,6 +4011,7 @@ static int zend_infer_types(const zend_op_array *op_array, const zend_script *sc
zend_func_return_info(op_array, 1, 0, &ZEND_FUNC_INFO(op_array)->return_info);
}
+ free_alloca(worklist, use_heap);
return SUCCESS;
}
@@ -4026,12 +4060,13 @@ void zend_inference_check_recursive_dependencies(zend_op_array *op_array)
zend_call_info *call_info;
zend_bitset worklist;
int worklist_len;
+ ALLOCA_FLAG(use_heap);
if (!info->ssa.var_info || !(info->flags & ZEND_FUNC_RECURSIVE)) {
return;
}
worklist_len = zend_bitset_len(info->ssa.vars_count);
- worklist = alloca(sizeof(zend_ulong) * worklist_len);
+ worklist = do_alloca(sizeof(zend_ulong) * worklist_len, use_heap);
memset(worklist, 0, sizeof(zend_ulong) * worklist_len);
call_info = info->callee_info;
while (call_info) {
@@ -4049,6 +4084,7 @@ void zend_inference_check_recursive_dependencies(zend_op_array *op_array)
add_usages(op_array, &info->ssa, worklist, i);
}
}
+ free_alloca(worklist, use_heap);
}
/*
diff --git a/ext/opcache/Optimizer/zend_ssa.c b/ext/opcache/Optimizer/zend_ssa.c
index 5cdbd283ec..24b2eb1ed5 100644
--- a/ext/opcache/Optimizer/zend_ssa.c
+++ b/ext/opcache/Optimizer/zend_ssa.c
@@ -82,10 +82,11 @@ static int zend_ssa_rename(const zend_op_array *op_array, zend_ssa *ssa, int *va
uint32_t k;
zend_op *opline;
int *tmp = NULL;
+ ALLOCA_FLAG(use_heap);
// FIXME: Can we optimize this copying out in some cases?
if (blocks[n].next_child >= 0) {
- tmp = alloca(sizeof(int) * (op_array->last_var + op_array->T));
+ tmp = do_alloca(sizeof(int) * (op_array->last_var + op_array->T), use_heap);
memcpy(tmp, var, sizeof(int) * (op_array->last_var + op_array->T));
var = tmp;
}
@@ -365,6 +366,10 @@ static int zend_ssa_rename(const zend_op_array *op_array, zend_ssa *ssa, int *va
j = blocks[j].next_child;
}
+ if (tmp) {
+ free_alloca(tmp, use_heap);
+ }
+
return SUCCESS;
}
/* }}} */
@@ -379,6 +384,8 @@ int zend_build_ssa(zend_arena **arena, const zend_op_array *op_array, uint32_t b
int *var = NULL;
int i, j, k, changed;
zend_dfg dfg;
+ ALLOCA_FLAG(dfg_use_heap);
+ ALLOCA_FLAG(var_use_heap);
ssa->rt_constants = (build_flags & ZEND_RT_CONSTANTS);
ssa_blocks = zend_arena_calloc(arena, blocks_count, sizeof(zend_ssa_block));
@@ -390,7 +397,7 @@ int zend_build_ssa(zend_arena **arena, const zend_op_array *op_array, uint32_t b
/* Compute Variable Liveness */
dfg.vars = op_array->last_var + op_array->T;
dfg.size = set_size = zend_bitset_len(dfg.vars);
- dfg.tmp = alloca((set_size * sizeof(zend_ulong)) * (blocks_count * 5 + 1));
+ dfg.tmp = do_alloca((set_size * sizeof(zend_ulong)) * (blocks_count * 5 + 1), dfg_use_heap);
memset(dfg.tmp, 0, (set_size * sizeof(zend_ulong)) * (blocks_count * 5 + 1));
dfg.gen = dfg.tmp + set_size;
dfg.def = dfg.gen + set_size * blocks_count;
@@ -399,6 +406,7 @@ int zend_build_ssa(zend_arena **arena, const zend_op_array *op_array, uint32_t b
dfg.out = dfg.in + set_size * blocks_count;
if (zend_build_dfg(op_array, &ssa->cfg, &dfg) != SUCCESS) {
+ free_alloca(dfg.tmp, dfg_use_heap);
return FAILURE;
}
@@ -435,8 +443,9 @@ int zend_build_ssa(zend_arena **arena, const zend_op_array *op_array, uint32_t b
} while (changed);
/* SSA construction, Step 2: Phi placement based on Dominance Frontiers */
- var = alloca(sizeof(int) * (op_array->last_var + op_array->T));
+ var = do_alloca(sizeof(int) * (op_array->last_var + op_array->T), var_use_heap);
if (!var) {
+ free_alloca(dfg.tmp, dfg_use_heap);
return FAILURE;
}
zend_bitset_clear(tmp, set_size);
@@ -472,8 +481,9 @@ int zend_build_ssa(zend_arena **arena, const zend_op_array *op_array, uint32_t b
sizeof(int) * blocks[j].predecessors_count +
sizeof(void*) * blocks[j].predecessors_count);
- if (!phi)
- return FAILURE;
+ if (!phi) {
+ goto failure;
+ }
phi->sources = (int*)(((char*)phi) + sizeof(zend_ssa_phi));
memset(phi->sources, 0xff, sizeof(int) * blocks[j].predecessors_count);
phi->use_chains = (zend_ssa_phi**)(((char*)phi->sources) + sizeof(int) * ssa->cfg.blocks[j].predecessors_count);
@@ -668,34 +678,34 @@ int zend_build_ssa(zend_arena **arena, const zend_op_array *op_array, uint32_t b
if (var1 >= 0) {
if ((opline-1)->opcode == ZEND_IS_EQUAL) {
if (add_pi(arena, op_array, &dfg, ssa, j, bt, var1, var2, var2, val2, val2, 0, 0, 0) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
if (add_pi(arena, op_array, &dfg, ssa, j, bf, var1, var2, var2, val2, val2, 0, 0, 1) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
} else if ((opline-1)->opcode == ZEND_IS_NOT_EQUAL) {
if (add_pi(arena, op_array, &dfg, ssa, j, bf, var1, var2, var2, val2, val2, 0, 0, 0) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
if (add_pi(arena, op_array, &dfg, ssa, j, bt, var1, var2, var2, val2, val2, 0, 0, 1) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
} else if ((opline-1)->opcode == ZEND_IS_SMALLER) {
if (val2 > LONG_MIN) {
if (add_pi(arena, op_array, &dfg, ssa, j, bt, var1, -1, var2, LONG_MIN, val2-1, 1, 0, 0) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
}
if (add_pi(arena, op_array, &dfg, ssa, j, bf, var1, var2, -1, val2, LONG_MAX, 0, 1, 0) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
} else if ((opline-1)->opcode == ZEND_IS_SMALLER_OR_EQUAL) {
if (add_pi(arena, op_array, &dfg, ssa, j, bt, var1, -1, var2, LONG_MIN, val2, 1, 0, 0) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
if (val2 < LONG_MAX) {
if (add_pi(arena, op_array, &dfg, ssa, j, bf, var1, var2, -1, val2+1, LONG_MAX, 0, 1, 0) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
}
}
@@ -703,33 +713,34 @@ int zend_build_ssa(zend_arena **arena, const zend_op_array *op_array, uint32_t b
if (var2 >= 0) {
if((opline-1)->opcode == ZEND_IS_EQUAL) {
if (add_pi(arena, op_array, &dfg, ssa, j, bt, var2, var1, var1, val1, val1, 0, 0, 0) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
if (add_pi(arena, op_array, &dfg, ssa, j, bf, var2, var1, var1, val1, val1, 0, 0, 1) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
} else if ((opline-1)->opcode == ZEND_IS_NOT_EQUAL) {
if (add_pi(arena, op_array, &dfg, ssa, j, bf, var2, var1, var1, val1, val1, 0, 0, 0) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
if (add_pi(arena, op_array, &dfg, ssa, j, bt, var2, var1, var1, val1, val1, 0, 0, 1) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
} else if ((opline-1)->opcode == ZEND_IS_SMALLER) {
if (val1 < LONG_MAX) {
if (add_pi(arena, op_array, &dfg, ssa, j, bt, var2, var1, -1, val1+1, LONG_MAX, 0, 1, 0) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
}
if (add_pi(arena, op_array, &dfg, ssa, j, bf, var2, -1, var1, LONG_MIN, val1, 1, 0, 0) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
} else if ((opline-1)->opcode == ZEND_IS_SMALLER_OR_EQUAL) {
if (add_pi(arena, op_array, &dfg, ssa, j, bt, var2, var1, -1, val1, LONG_MAX, 0 ,1, 0) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
if (val1 > LONG_MIN) {
- if (add_pi(arena, op_array, &dfg, ssa, j, bf, var2, -1, var1, LONG_MIN, val1-1, 1, 0, 0) != SUCCESS) { return FAILURE;
+ if (add_pi(arena, op_array, &dfg, ssa, j, bf, var2, -1, var1, LONG_MIN, val1-1, 1, 0, 0) != SUCCESS) {
+ goto failure;
}
}
}
@@ -743,17 +754,17 @@ int zend_build_ssa(zend_arena **arena, const zend_op_array *op_array, uint32_t b
if ((opline-1)->opcode == ZEND_POST_DEC) {
if (add_pi(arena, op_array, &dfg, ssa, j, bf, var, -1, -1, -1, -1, 0, 0, 0) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
if (add_pi(arena, op_array, &dfg, ssa, j, bt, var, -1, -1, -1, -1, 0, 0, 1) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
} else if ((opline-1)->opcode == ZEND_POST_INC) {
if (add_pi(arena, op_array, &dfg, ssa, j, bf, var, -1, -1, 1, 1, 0, 0, 0) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
if (add_pi(arena, op_array, &dfg, ssa, j, bt, var, -1, -1, 1, 1, 0, 0, 1) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
}
} else if (opline->op1_type == IS_VAR &&
@@ -765,19 +776,19 @@ int zend_build_ssa(zend_arena **arena, const zend_op_array *op_array, uint32_t b
if ((opline-1)->opcode == ZEND_PRE_DEC) {
if (add_pi(arena, op_array, &dfg, ssa, j, bf, var, -1, -1, 0, 0, 0, 0, 0) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
/* speculative */
if (add_pi(arena, op_array, &dfg, ssa, j, bt, var, -1, -1, 0, 0, 0, 0, 1) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
} else if ((opline-1)->opcode == ZEND_PRE_INC) {
if (add_pi(arena, op_array, &dfg, ssa, j, bf, var, -1, -1, 0, 0, 0, 0, 0) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
/* speculative */
if (add_pi(arena, op_array, &dfg, ssa, j, bt, var, -1, -1, 0, 0, 0, 0, 1) != SUCCESS) {
- return FAILURE;
+ goto failure;
}
}
}
@@ -836,8 +847,9 @@ int zend_build_ssa(zend_arena **arena, const zend_op_array *op_array, uint32_t b
sizeof(int) * blocks[j].predecessors_count +
sizeof(void*) * blocks[j].predecessors_count);
- if (!phi)
- return FAILURE;
+ if (!phi) {
+ goto failure;
+ }
phi->sources = (int*)(((char*)phi) + sizeof(zend_ssa_phi));
memset(phi->sources, 0xff, sizeof(int) * blocks[j].predecessors_count);
phi->use_chains = (zend_ssa_phi**)(((char*)phi->sources) + sizeof(int) * ssa->cfg.blocks[j].predecessors_count);
@@ -868,9 +880,15 @@ int zend_build_ssa(zend_arena **arena, const zend_op_array *op_array, uint32_t b
}
ssa->vars_count = op_array->last_var;
if (zend_ssa_rename(op_array, ssa, var, 0) != SUCCESS) {
+failure:
+ free_alloca(var, var_use_heap);
+ free_alloca(dfg.tmp, dfg_use_heap);
return FAILURE;
}
+ free_alloca(var, var_use_heap);
+ free_alloca(dfg.tmp, dfg_use_heap);
+
return SUCCESS;
}
/* }}} */
diff --git a/ext/opcache/Optimizer/zend_worklist.h b/ext/opcache/Optimizer/zend_worklist.h
index c48e039922..2ba0e1c632 100644
--- a/ext/opcache/Optimizer/zend_worklist.h
+++ b/ext/opcache/Optimizer/zend_worklist.h
@@ -30,12 +30,15 @@ typedef struct _zend_worklist_stack {
int capacity;
} zend_worklist_stack;
-#define ZEND_WORKLIST_STACK_ALLOCA(s, _len) do { \
- (s)->buf = (int*)alloca(sizeof(int) * _len); \
+#define ZEND_WORKLIST_STACK_ALLOCA(s, _len, use_heap) do { \
+ (s)->buf = (int*)do_alloca(sizeof(int) * _len, use_heap); \
(s)->len = 0; \
(s)->capacity = _len; \
} while (0)
+#define ZEND_WORKLIST_STACK_FREE_ALLOCA(s, use_heap) \
+ free_alloca((s)->buf, use_heap)
+
static inline int zend_worklist_stack_prepare(zend_arena **arena, zend_worklist_stack *stack, int len)
{
ZEND_ASSERT(len >= 0);
@@ -73,12 +76,17 @@ typedef struct _zend_worklist {
zend_worklist_stack stack;
} zend_worklist;
-#define ZEND_WORKLIST_ALLOCA(w, _len) do { \
- (w)->visited = (zend_bitset)alloca(sizeof(zend_ulong) * zend_bitset_len(_len)); \
+#define ZEND_WORKLIST_ALLOCA(w, _len, use_heap) do { \
+ (w)->stack.buf = (int*)do_alloca(sizeof(int) * _len + sizeof(zend_ulong) * zend_bitset_len(_len), use_heap); \
+ (w)->stack.len = 0; \
+ (w)->stack.capacity = _len; \
+ (w)->visited = (zend_bitset)((w)->stack.buf + _len); \
memset((w)->visited, 0, sizeof(zend_ulong) * zend_bitset_len(_len)); \
- ZEND_WORKLIST_STACK_ALLOCA(&(w)->stack, _len); \
} while (0)
+#define ZEND_WORKLIST_FREE_ALLOCA(w, use_heap) \
+ free_alloca((w)->stack.buf, use_heap)
+
static inline int zend_worklist_prepare(zend_arena **arena, zend_worklist *worklist, int len)
{
ZEND_ASSERT(len >= 0);