diff options
Diffstat (limited to 'gcc/tree-scalar-evolution.c')
-rw-r--r-- | gcc/tree-scalar-evolution.c | 183 |
1 files changed, 183 insertions, 0 deletions
diff --git a/gcc/tree-scalar-evolution.c b/gcc/tree-scalar-evolution.c index 52b0ba38b67..13cbe42b55f 100644 --- a/gcc/tree-scalar-evolution.c +++ b/gcc/tree-scalar-evolution.c @@ -1718,6 +1718,184 @@ compute_scalar_evolution_in_loop (struct loop *wrto_loop, return analyze_scalar_evolution_1 (wrto_loop, res, chrec_not_analyzed_yet); } +/* Folds EXPR, if it is a cast to pointer, assuming that the created + polynomial_chrec does not wrap. */ + +static tree +fold_used_pointer_cast (tree expr) +{ + tree op; + tree type, inner_type; + + if (TREE_CODE (expr) != NOP_EXPR && TREE_CODE (expr) != CONVERT_EXPR) + return expr; + + op = TREE_OPERAND (expr, 0); + if (TREE_CODE (op) != POLYNOMIAL_CHREC) + return expr; + + type = TREE_TYPE (expr); + inner_type = TREE_TYPE (op); + + if (!INTEGRAL_TYPE_P (inner_type) + || TYPE_PRECISION (inner_type) != TYPE_PRECISION (type)) + return expr; + + return build_polynomial_chrec (CHREC_VARIABLE (op), + chrec_convert (type, CHREC_LEFT (op), NULL_TREE), + chrec_convert (type, CHREC_RIGHT (op), NULL_TREE)); +} + +/* Returns true if EXPR is an expression corresponding to offset of pointer + in p + offset. */ + +static bool +pointer_offset_p (tree expr) +{ + if (TREE_CODE (expr) == INTEGER_CST) + return true; + + if ((TREE_CODE (expr) == NOP_EXPR || TREE_CODE (expr) == CONVERT_EXPR) + && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)))) + return true; + + return false; +} + +/* EXPR is a scalar evolution of a pointer that is dereferenced or used in + comparison. This means that it must point to a part of some object in + memory, which enables us to argue about overflows and possibly simplify + the EXPR. Returns the simplified value. + + Currently, for + + int i, n; + int *p; + + for (i = -n; i < n; i++) + *(p + i) = ...; + + We generate the following code (assuming that size of int and size_t is + 4 bytes): + + for (i = -n; i < n; i++) + { + size_t tmp1, tmp2; + int *tmp3, *tmp4; + + tmp1 = (size_t) i; (1) + tmp2 = 4 * tmp1; (2) + tmp3 = (int *) tmp2; (3) + tmp4 = p + tmp3; (4) + + *tmp4 = ...; + } + + We in general assume that pointer arithmetics does not overflow (since its + behavior is undefined in that case). One of the problems is that our + translation does not capture this property very well -- (int *) is + considered unsigned, hence the computation in (4) does overflow if i is + negative. + + This impreciseness creates complications in scev analysis. The scalar + evolution of i is [-n, +, 1]. Since int and size_t have the same precision + (in this example), and size_t is unsigned (so we do not care about + overflows), we succeed to derive that scev of tmp1 is [(size_t) -n, +, 1] + and scev of tmp2 is [4 * (size_t) -n, +, 4]. With tmp3, we run into + problem -- [(int *) (4 * (size_t) -n), +, 4] wraps, and since we on several + places assume that this is not the case for scevs with pointer type, we + cannot use this scev for tmp3; hence, its scev is + (int *) [(4 * (size_t) -n), +, 4], and scev of tmp4 is + p + (int *) [(4 * (size_t) -n), +, 4]. Most of the optimizers are unable to + work with scevs of this shape. + + However, since tmp4 is dereferenced, all its values must belong to a single + object, and taking into account that the precision of int * and size_t is + the same, it is impossible for its scev to wrap. Hence, we can derive that + its evolution is [p + (int *) (4 * (size_t) -n), +, 4], which the optimizers + can work with. + + ??? Maybe we should use different representation for pointer arithmetics, + however that is a long-term project with a lot of potential for creating + bugs. */ + +static tree +fold_used_pointer (tree expr) +{ + tree op0, op1, new0, new1; + enum tree_code code = TREE_CODE (expr); + + if (code == PLUS_EXPR + || code == MINUS_EXPR) + { + op0 = TREE_OPERAND (expr, 0); + op1 = TREE_OPERAND (expr, 1); + + if (pointer_offset_p (op1)) + { + new0 = fold_used_pointer (op0); + new1 = fold_used_pointer_cast (op1); + } + else if (code == PLUS_EXPR && pointer_offset_p (op0)) + { + new0 = fold_used_pointer_cast (op0); + new1 = fold_used_pointer (op1); + } + else + return expr; + + if (new0 == op0 && new1 == op1) + return expr; + + if (code == PLUS_EXPR) + expr = chrec_fold_plus (TREE_TYPE (expr), new0, new1); + else + expr = chrec_fold_minus (TREE_TYPE (expr), new0, new1); + + return expr; + } + else + return fold_used_pointer_cast (expr); +} + +/* Returns true if PTR is dereferenced, or used in comparison. */ + +static bool +pointer_used_p (tree ptr) +{ + use_operand_p use_p; + imm_use_iterator imm_iter; + tree stmt, rhs; + struct ptr_info_def *pi = get_ptr_info (ptr); + var_ann_t v_ann = var_ann (SSA_NAME_VAR (ptr)); + + /* Check whether the pointer has a memory tag; if it does, it is + (or at least used to be) dereferenced. */ + if ((pi != NULL && pi->name_mem_tag != NULL) + || v_ann->symbol_mem_tag) + return true; + + FOR_EACH_IMM_USE_FAST (use_p, imm_iter, ptr) + { + stmt = USE_STMT (use_p); + if (TREE_CODE (stmt) == COND_EXPR) + return true; + + if (TREE_CODE (stmt) != MODIFY_EXPR) + continue; + + rhs = TREE_OPERAND (stmt, 1); + if (!COMPARISON_CLASS_P (rhs)) + continue; + + if (TREE_OPERAND (stmt, 0) == ptr + || TREE_OPERAND (stmt, 1) == ptr) + return true; + } + + return false; +} + /* Helper recursive function. */ static tree @@ -1766,6 +1944,11 @@ analyze_scalar_evolution_1 (struct loop *loop, tree var, tree res) { case MODIFY_EXPR: res = interpret_rhs_modify_expr (loop, def, TREE_OPERAND (def, 1), type); + + if (POINTER_TYPE_P (type) + && !automatically_generated_chrec_p (res) + && pointer_used_p (var)) + res = fold_used_pointer (res); break; case PHI_NODE: |