summaryrefslogtreecommitdiff
path: root/gcc/fortran
diff options
context:
space:
mode:
authorsteven <steven@138bc75d-0d04-0410-961f-82ee72b054a4>2005-03-23 11:09:24 +0000
committersteven <steven@138bc75d-0d04-0410-961f-82ee72b054a4>2005-03-23 11:09:24 +0000
commit318c9b27872d5dfa736fdb68456a3b0526aadaca (patch)
tree4fd68cf88db134610d6dfe0cb8b84a5c634ca886 /gcc/fortran
parentb9fba50abc3d6dc4178b1140cb9ecd6d313ffab2 (diff)
downloadgcc-318c9b27872d5dfa736fdb68456a3b0526aadaca.tar.gz
* convert.c (convert): Replace fold (buildN (...)) with fold_buildN.
* trans-array.c (gfc_trans_allocate_array_storage, gfc_trans_allocate_temp_array gfc_trans_array_constructor_value, gfc_conv_array_index_ref, gfc_trans_array_bound_check, gfc_conv_array_index_offset, gfc_conv_scalarized_array_ref, gfc_conv_array_ref, gfc_trans_preloop_setup, gfc_conv_ss_startstride, gfc_conv_loop_setup, gfc_array_init_size, gfc_trans_array_bounds, gfc_trans_auto_array_allocation, gfc_trans_dummy_array_bias, gfc_conv_expr_descriptor): Likewise. * trans-expr.c (gfc_conv_powi, gfc_conv_string_tmp, gfc_conv_concat_op, gfc_conv_expr_op): Likewise. * trans-intrinsic.c (build_round_expr, gfc_conv_intrinsic_bound, gfc_conv_intrinsic_cmplx, gfc_conv_intrinsic_sign, gfc_conv_intrinsic_minmaxloc, gfc_conv_intrinsic_minmaxval, gfc_conv_intrinsic_btest, gfc_conv_intrinsic_bitop, gfc_conv_intrinsic_singlebitop, gfc_conv_intrinsic_ibits, gfc_conv_intrinsic_ishft, gfc_conv_intrinsic_ishftc, gfc_conv_intrinsic_merge, prepare_arg_info, gfc_conv_intrinsic_rrspacing, gfc_conv_intrinsic_repeat): Likewise. * trans-stmt.c (gfc_trans_simple_do, gfc_trans_do, gfc_trans_do_while, gfc_trans_forall_loop, gfc_do_allocate, generate_loop_for_temp_to_lhs, generate_loop_for_rhs_to_temp, compute_inner_temp_size, allocate_temp_for_forall_nest, gfc_trans_pointer_assign_need_temp, gfc_trans_forall_1, gfc_evaluate_where_mask, gfc_trans_where_assign): Likewise. * trans-types.c (gfc_get_dtype, gfc_get_array_type_bounds): Likewise. * trans.c (gfc_add_modify_expr): Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@96926 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/fortran')
-rw-r--r--gcc/fortran/ChangeLog30
-rw-r--r--gcc/fortran/convert.c6
-rw-r--r--gcc/fortran/trans-array.c196
-rw-r--r--gcc/fortran/trans-expr.c15
-rw-r--r--gcc/fortran/trans-intrinsic.c128
-rw-r--r--gcc/fortran/trans-stmt.c116
-rw-r--r--gcc/fortran/trans-types.c12
-rw-r--r--gcc/fortran/trans.c2
8 files changed, 267 insertions, 238 deletions
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index 7ab43e46719..aa990eb616a 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,33 @@
+2005-03-23 Steven Bosscher <stevenb@suse.de>
+
+ * convert.c (convert): Replace fold (buildN (...)) with fold_buildN.
+ * trans-array.c (gfc_trans_allocate_array_storage,
+ gfc_trans_allocate_temp_array gfc_trans_array_constructor_value,
+ gfc_conv_array_index_ref, gfc_trans_array_bound_check,
+ gfc_conv_array_index_offset, gfc_conv_scalarized_array_ref,
+ gfc_conv_array_ref, gfc_trans_preloop_setup, gfc_conv_ss_startstride,
+ gfc_conv_loop_setup, gfc_array_init_size, gfc_trans_array_bounds,
+ gfc_trans_auto_array_allocation, gfc_trans_dummy_array_bias,
+ gfc_conv_expr_descriptor): Likewise.
+ * trans-expr.c (gfc_conv_powi, gfc_conv_string_tmp,
+ gfc_conv_concat_op, gfc_conv_expr_op): Likewise.
+ * trans-intrinsic.c (build_round_expr, gfc_conv_intrinsic_bound,
+ gfc_conv_intrinsic_cmplx, gfc_conv_intrinsic_sign,
+ gfc_conv_intrinsic_minmaxloc, gfc_conv_intrinsic_minmaxval,
+ gfc_conv_intrinsic_btest, gfc_conv_intrinsic_bitop,
+ gfc_conv_intrinsic_singlebitop, gfc_conv_intrinsic_ibits,
+ gfc_conv_intrinsic_ishft, gfc_conv_intrinsic_ishftc,
+ gfc_conv_intrinsic_merge, prepare_arg_info,
+ gfc_conv_intrinsic_rrspacing, gfc_conv_intrinsic_repeat): Likewise.
+ * trans-stmt.c (gfc_trans_simple_do, gfc_trans_do, gfc_trans_do_while,
+ gfc_trans_forall_loop, gfc_do_allocate, generate_loop_for_temp_to_lhs,
+ generate_loop_for_rhs_to_temp, compute_inner_temp_size,
+ allocate_temp_for_forall_nest, gfc_trans_pointer_assign_need_temp,
+ gfc_trans_forall_1, gfc_evaluate_where_mask, gfc_trans_where_assign):
+ Likewise.
+ * trans-types.c (gfc_get_dtype, gfc_get_array_type_bounds): Likewise.
+ * trans.c (gfc_add_modify_expr): Likewise.
+
2005-03-22 Francois-Xavier Coudert <coudert@clipper.ens.fr>
* check.c (gfc_check_chdir, gfc_check_chdir_sub, gfc_check_kill,
diff --git a/gcc/fortran/convert.c b/gcc/fortran/convert.c
index 9759f057f50..a0298f22784 100644
--- a/gcc/fortran/convert.c
+++ b/gcc/fortran/convert.c
@@ -81,7 +81,7 @@ convert (tree type, tree expr)
return expr;
if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (expr)))
- return fold (build1 (NOP_EXPR, type, expr));
+ return fold_build1 (NOP_EXPR, type, expr);
if (TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK)
return error_mark_node;
if (TREE_CODE (TREE_TYPE (expr)) == VOID_TYPE)
@@ -106,9 +106,9 @@ convert (tree type, tree expr)
/* If we have a NOP_EXPR, we must fold it here to avoid
infinite recursion between fold () and convert (). */
if (TREE_CODE (e) == NOP_EXPR)
- return fold (build1 (NOP_EXPR, type, TREE_OPERAND (e, 0)));
+ return fold_build1 (NOP_EXPR, type, TREE_OPERAND (e, 0));
else
- return fold (build1 (NOP_EXPR, type, e));
+ return fold_build1 (NOP_EXPR, type, e);
}
if (code == POINTER_TYPE || code == REFERENCE_TYPE)
return fold (convert_to_pointer (type, e));
diff --git a/gcc/fortran/trans-array.c b/gcc/fortran/trans-array.c
index b53f8629fd1..2d0bff8c070 100644
--- a/gcc/fortran/trans-array.c
+++ b/gcc/fortran/trans-array.c
@@ -429,8 +429,8 @@ gfc_trans_allocate_array_storage (gfc_loopinfo * loop, gfc_ss_info * info,
if (onstack)
{
/* Make a temporary variable to hold the data. */
- tmp = fold (build2 (MINUS_EXPR, TREE_TYPE (nelem), nelem,
- integer_one_node));
+ tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (nelem), nelem,
+ integer_one_node);
tmp = build_range_type (gfc_array_index_type, gfc_index_zero_node,
tmp);
tmp = build_array_type (gfc_get_element_type (TREE_TYPE (desc)),
@@ -508,8 +508,8 @@ gfc_trans_allocate_temp_array (gfc_loopinfo * loop, gfc_ss_info * info,
{
/* Callee allocated arrays may not have a known bound yet. */
if (loop->to[n])
- loop->to[n] = fold (build2 (MINUS_EXPR, gfc_array_index_type,
- loop->to[n], loop->from[n]));
+ loop->to[n] = fold_build2 (MINUS_EXPR, gfc_array_index_type,
+ loop->to[n], loop->from[n]);
loop->from[n] = gfc_index_zero_node;
}
@@ -569,18 +569,18 @@ gfc_trans_allocate_temp_array (gfc_loopinfo * loop, gfc_ss_info * info,
tmp = gfc_conv_descriptor_ubound (desc, gfc_rank_cst[n]);
gfc_add_modify_expr (&loop->pre, tmp, loop->to[n]);
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- loop->to[n], gfc_index_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ loop->to[n], gfc_index_one_node);
- size = fold (build2 (MULT_EXPR, gfc_array_index_type, size, tmp));
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
size = gfc_evaluate_now (size, &loop->pre);
}
/* Get the size of the array. */
nelem = size;
if (size)
- size = fold (build2 (MULT_EXPR, gfc_array_index_type, size,
- TYPE_SIZE_UNIT (gfc_get_element_type (type))));
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size,
+ TYPE_SIZE_UNIT (gfc_get_element_type (type)));
gfc_trans_allocate_array_storage (loop, info, size, nelem);
@@ -765,8 +765,8 @@ gfc_trans_array_constructor_value (stmtblock_t * pblock, tree type,
gfc_trans_array_ctor_element (&body, pointer, *poffset, &se,
c->expr);
- *poffset = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- *poffset, gfc_index_one_node));
+ *poffset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ *poffset, gfc_index_one_node);
}
else
{
@@ -832,8 +832,8 @@ gfc_trans_array_constructor_value (stmtblock_t * pblock, tree type,
tmp);
gfc_add_expr_to_block (&body, tmp);
- *poffset = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- *poffset, bound));
+ *poffset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ *poffset, bound);
}
if (!INTEGER_CST_P (*poffset))
{
@@ -1399,9 +1399,9 @@ gfc_conv_array_index_ref (gfc_se * se, tree pointer, tree * indices,
{
/* index = index + stride[n]*indices[n] */
tmp = gfc_conv_array_stride (se->expr, n);
- tmp = fold (build2 (MULT_EXPR, gfc_array_index_type, indices[n], tmp));
+ tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, indices[n], tmp);
- index = fold (build2 (PLUS_EXPR, gfc_array_index_type, index, tmp));
+ index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, tmp);
}
/* Result = data[index]. */
@@ -1429,11 +1429,11 @@ gfc_trans_array_bound_check (gfc_se * se, tree descriptor, tree index, int n)
index = gfc_evaluate_now (index, &se->pre);
/* Check lower bound. */
tmp = gfc_conv_array_lbound (descriptor, n);
- fault = fold (build2 (LT_EXPR, boolean_type_node, index, tmp));
+ fault = fold_build2 (LT_EXPR, boolean_type_node, index, tmp);
/* Check upper bound. */
tmp = gfc_conv_array_ubound (descriptor, n);
- cond = fold (build2 (GT_EXPR, boolean_type_node, index, tmp));
- fault = fold (build2 (TRUTH_OR_EXPR, boolean_type_node, fault, cond));
+ cond = fold_build2 (GT_EXPR, boolean_type_node, index, tmp);
+ fault = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, fault, cond);
gfc_trans_runtime_check (fault, gfc_strconst_fault, &se->pre);
@@ -1528,10 +1528,10 @@ gfc_conv_array_index_offset (gfc_se * se, gfc_ss_info * info, int dim, int i,
/* Multiply the loop variable by the stride and delta. */
index = se->loop->loopvar[i];
- index = fold (build2 (MULT_EXPR, gfc_array_index_type, index,
- info->stride[i]));
- index = fold (build2 (PLUS_EXPR, gfc_array_index_type, index,
- info->delta[i]));
+ index = fold_build2 (MULT_EXPR, gfc_array_index_type, index,
+ info->stride[i]);
+ index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index,
+ info->delta[i]);
if (ar->dimen_type[dim] == DIMEN_VECTOR)
{
@@ -1552,12 +1552,12 @@ gfc_conv_array_index_offset (gfc_se * se, gfc_ss_info * info, int dim, int i,
gcc_assert (se->loop);
index = se->loop->loopvar[se->loop->order[i]];
if (!integer_zerop (info->delta[i]))
- index = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- index, info->delta[i]));
+ index = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ index, info->delta[i]);
}
/* Multiply by the stride. */
- index = fold (build2 (MULT_EXPR, gfc_array_index_type, index, stride));
+ index = fold_build2 (MULT_EXPR, gfc_array_index_type, index, stride);
return index;
}
@@ -1583,7 +1583,7 @@ gfc_conv_scalarized_array_ref (gfc_se * se, gfc_array_ref * ar)
info->stride0);
/* Add the offset for this dimension to the stored offset for all other
dimensions. */
- index = fold (build2 (PLUS_EXPR, gfc_array_index_type, index, info->offset));
+ index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, info->offset);
tmp = gfc_build_indirect_ref (info->data);
se->expr = gfc_build_array_ref (tmp, index);
@@ -1643,25 +1643,25 @@ gfc_conv_array_ref (gfc_se * se, gfc_array_ref * ar)
indexse.expr = gfc_evaluate_now (indexse.expr, &se->pre);
tmp = gfc_conv_array_lbound (se->expr, n);
- cond = fold (build2 (LT_EXPR, boolean_type_node,
- indexse.expr, tmp));
+ cond = fold_build2 (LT_EXPR, boolean_type_node,
+ indexse.expr, tmp);
fault =
- fold (build2 (TRUTH_OR_EXPR, boolean_type_node, fault, cond));
+ fold_build2 (TRUTH_OR_EXPR, boolean_type_node, fault, cond);
tmp = gfc_conv_array_ubound (se->expr, n);
- cond = fold (build2 (GT_EXPR, boolean_type_node,
- indexse.expr, tmp));
+ cond = fold_build2 (GT_EXPR, boolean_type_node,
+ indexse.expr, tmp);
fault =
- fold (build2 (TRUTH_OR_EXPR, boolean_type_node, fault, cond));
+ fold_build2 (TRUTH_OR_EXPR, boolean_type_node, fault, cond);
}
/* Multiply the index by the stride. */
stride = gfc_conv_array_stride (se->expr, n);
- tmp = fold (build2 (MULT_EXPR, gfc_array_index_type, indexse.expr,
- stride));
+ tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, indexse.expr,
+ stride);
/* And add it to the total. */
- index = fold (build2 (PLUS_EXPR, gfc_array_index_type, index, tmp));
+ index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, tmp);
}
if (flag_bounds_check)
@@ -1669,7 +1669,7 @@ gfc_conv_array_ref (gfc_se * se, gfc_array_ref * ar)
tmp = gfc_conv_array_offset (se->expr);
if (!integer_zerop (tmp))
- index = fold (build2 (PLUS_EXPR, gfc_array_index_type, index, tmp));
+ index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, tmp);
/* Access the calculated element. */
tmp = gfc_conv_array_data (se->expr);
@@ -1730,8 +1730,8 @@ gfc_trans_preloop_setup (gfc_loopinfo * loop, int dim, int flag,
stride);
gfc_add_block_to_block (pblock, &se.pre);
- info->offset = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- info->offset, index));
+ info->offset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ info->offset, index);
info->offset = gfc_evaluate_now (info->offset, pblock);
}
@@ -1769,8 +1769,8 @@ gfc_trans_preloop_setup (gfc_loopinfo * loop, int dim, int flag,
index = gfc_conv_array_index_offset (&se, info, info->dim[i], i,
ar, stride);
gfc_add_block_to_block (pblock, &se.pre);
- info->offset = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- info->offset, index));
+ info->offset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ info->offset, index);
info->offset = gfc_evaluate_now (info->offset, pblock);
}
@@ -2168,28 +2168,28 @@ gfc_conv_ss_startstride (gfc_loopinfo * loop)
/* Check lower bound. */
bound = gfc_conv_array_lbound (desc, dim);
tmp = info->start[n];
- tmp = fold (build2 (LT_EXPR, boolean_type_node, tmp, bound));
- fault = fold (build2 (TRUTH_OR_EXPR, boolean_type_node, fault,
- tmp));
+ tmp = fold_build2 (LT_EXPR, boolean_type_node, tmp, bound);
+ fault = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, fault,
+ tmp);
/* Check the upper bound. */
bound = gfc_conv_array_ubound (desc, dim);
end = gfc_conv_section_upper_bound (ss, n, &block);
- tmp = fold (build2 (GT_EXPR, boolean_type_node, end, bound));
- fault = fold (build2 (TRUTH_OR_EXPR, boolean_type_node, fault,
- tmp));
+ tmp = fold_build2 (GT_EXPR, boolean_type_node, end, bound);
+ fault = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, fault,
+ tmp);
/* Check the section sizes match. */
- tmp = fold (build2 (MINUS_EXPR, gfc_array_index_type, end,
- info->start[n]));
- tmp = fold (build2 (FLOOR_DIV_EXPR, gfc_array_index_type, tmp,
- info->stride[n]));
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, end,
+ info->start[n]);
+ tmp = fold_build2 (FLOOR_DIV_EXPR, gfc_array_index_type, tmp,
+ info->stride[n]);
/* We remember the size of the first section, and check all the
others against this. */
if (size[n])
{
tmp =
- fold (build2 (NE_EXPR, boolean_type_node, tmp, size[n]));
+ fold_build2 (NE_EXPR, boolean_type_node, tmp, size[n]);
fault =
build2 (TRUTH_OR_EXPR, boolean_type_node, fault, tmp);
}
@@ -2467,10 +2467,10 @@ gfc_conv_loop_setup (gfc_loopinfo * loop)
/* To = from + (size - 1) * stride. */
tmp = gfc_conv_mpz_to_tree (i, gfc_index_integer_kind);
if (!integer_onep (info->stride[n]))
- tmp = fold (build2 (MULT_EXPR, gfc_array_index_type,
- tmp, info->stride[n]));
- loop->to[n] = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- loop->from[n], tmp));
+ tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
+ tmp, info->stride[n]);
+ loop->to[n] = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ loop->from[n], tmp);
}
else
{
@@ -2508,10 +2508,10 @@ gfc_conv_loop_setup (gfc_loopinfo * loop)
with start = 0, this simplifies to
last = end / step;
for (i = 0; i<=last; i++){...}; */
- tmp = fold (build2 (MINUS_EXPR, gfc_array_index_type,
- loop->to[n], loop->from[n]));
- tmp = fold (build2 (TRUNC_DIV_EXPR, gfc_array_index_type,
- tmp, info->stride[n]));
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
+ loop->to[n], loop->from[n]);
+ tmp = fold_build2 (TRUNC_DIV_EXPR, gfc_array_index_type,
+ tmp, info->stride[n]);
loop->to[n] = gfc_evaluate_now (tmp, &loop->pre);
/* Make the loop variable start at 0. */
loop->from[n] = gfc_index_zero_node;
@@ -2563,12 +2563,12 @@ gfc_conv_loop_setup (gfc_loopinfo * loop)
{
/* Calculate the offset relative to the loop variable.
First multiply by the stride. */
- tmp = fold (build2 (MULT_EXPR, gfc_array_index_type,
- loop->from[n], info->stride[n]));
+ tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
+ loop->from[n], info->stride[n]);
/* Then subtract this from our starting value. */
- tmp = fold (build2 (MINUS_EXPR, gfc_array_index_type,
- info->start[n], tmp));
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
+ info->start[n], tmp);
info->delta[n] = gfc_evaluate_now (tmp, &loop->pre);
}
@@ -2650,8 +2650,8 @@ gfc_array_init_size (tree descriptor, int rank, tree * poffset,
gfc_add_modify_expr (pblock, tmp, se.expr);
/* Work out the offset for this component. */
- tmp = fold (build2 (MULT_EXPR, gfc_array_index_type, se.expr, stride));
- offset = fold (build2 (MINUS_EXPR, gfc_array_index_type, offset, tmp));
+ tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, se.expr, stride);
+ offset = fold_build2 (MINUS_EXPR, gfc_array_index_type, offset, tmp);
/* Start the calculation for the size of this dimension. */
size = build2 (MINUS_EXPR, gfc_array_index_type,
@@ -2671,17 +2671,17 @@ gfc_array_init_size (tree descriptor, int rank, tree * poffset,
gfc_add_modify_expr (pblock, tmp, stride);
/* Calculate the size of this dimension. */
- size = fold (build2 (PLUS_EXPR, gfc_array_index_type, se.expr, size));
+ size = fold_build2 (PLUS_EXPR, gfc_array_index_type, se.expr, size);
/* Multiply the stride by the number of elements in this dimension. */
- stride = fold (build2 (MULT_EXPR, gfc_array_index_type, stride, size));
+ stride = fold_build2 (MULT_EXPR, gfc_array_index_type, stride, size);
stride = gfc_evaluate_now (stride, pblock);
}
/* The stride is the number of elements in the array, so multiply by the
size of an element to get the total size. */
tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
- size = fold (build2 (MULT_EXPR, gfc_array_index_type, stride, tmp));
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, stride, tmp);
if (poffset != NULL)
{
@@ -2957,8 +2957,8 @@ gfc_trans_array_bounds (tree type, gfc_symbol * sym, tree * poffset,
gfc_add_modify_expr (pblock, ubound, se.expr);
}
/* The offset of this dimension. offset = offset - lbound * stride. */
- tmp = fold (build2 (MULT_EXPR, gfc_array_index_type, lbound, size));
- offset = fold (build2 (MINUS_EXPR, gfc_array_index_type, offset, tmp));
+ tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, lbound, size);
+ offset = fold_build2 (MINUS_EXPR, gfc_array_index_type, offset, tmp);
/* The size of this dimension, and the stride of the next. */
if (dim + 1 < as->rank)
@@ -2969,10 +2969,10 @@ gfc_trans_array_bounds (tree type, gfc_symbol * sym, tree * poffset,
if (ubound != NULL_TREE && !(stride && INTEGER_CST_P (stride)))
{
/* Calculate stride = size * (ubound + 1 - lbound). */
- tmp = fold (build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, lbound));
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type, ubound, tmp));
- tmp = fold (build2 (MULT_EXPR, gfc_array_index_type, size, tmp));
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
+ gfc_index_one_node, lbound);
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, ubound, tmp);
+ tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
if (stride)
gfc_add_modify_expr (pblock, stride, tmp);
else
@@ -3045,7 +3045,7 @@ gfc_trans_auto_array_allocation (tree decl, gfc_symbol * sym, tree fnbody)
/* The size is the number of elements in the array, so multiply by the
size of an element to get the total size. */
tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
- size = fold (build2 (MULT_EXPR, gfc_array_index_type, size, tmp));
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
/* Allocate memory to hold the data. */
tmp = gfc_chainon_list (NULL_TREE, size);
@@ -3203,7 +3203,7 @@ gfc_trans_dummy_array_bias (gfc_symbol * sym, tree tmpdesc, tree body)
partial = gfc_create_var (boolean_type_node, "partial");
TREE_USED (partial) = 1;
tmp = gfc_conv_descriptor_stride (dumdesc, gfc_rank_cst[0]);
- tmp = fold (build2 (EQ_EXPR, boolean_type_node, tmp, integer_one_node));
+ tmp = fold_build2 (EQ_EXPR, boolean_type_node, tmp, integer_one_node);
gfc_add_modify_expr (&block, partial, tmp);
}
else
@@ -3304,11 +3304,11 @@ gfc_trans_dummy_array_bias (gfc_symbol * sym, tree tmpdesc, tree body)
{
/* Check (ubound(a) - lbound(a) == ubound(b) - lbound(b)). */
- tmp = fold (build2 (MINUS_EXPR, gfc_array_index_type,
- ubound, lbound));
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
+ ubound, lbound);
stride = build2 (MINUS_EXPR, gfc_array_index_type,
dubound, dlbound);
- tmp = fold (build2 (NE_EXPR, gfc_array_index_type, tmp, stride));
+ tmp = fold_build2 (NE_EXPR, gfc_array_index_type, tmp, stride);
gfc_trans_runtime_check (tmp, gfc_strconst_bounds, &block);
}
}
@@ -3317,12 +3317,12 @@ gfc_trans_dummy_array_bias (gfc_symbol * sym, tree tmpdesc, tree body)
/* For assumed shape arrays move the upper bound by the same amount
as the lower bound. */
tmp = build2 (MINUS_EXPR, gfc_array_index_type, dubound, dlbound);
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type, tmp, lbound));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, tmp, lbound);
gfc_add_modify_expr (&block, ubound, tmp);
}
/* The offset of this dimension. offset = offset - lbound * stride. */
- tmp = fold (build2 (MULT_EXPR, gfc_array_index_type, lbound, stride));
- offset = fold (build2 (MINUS_EXPR, gfc_array_index_type, offset, tmp));
+ tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, lbound, stride);
+ offset = fold_build2 (MINUS_EXPR, gfc_array_index_type, offset, tmp);
/* The size of this dimension, and the stride of the next. */
if (n + 1 < sym->as->rank)
@@ -3343,12 +3343,12 @@ gfc_trans_dummy_array_bias (gfc_symbol * sym, tree tmpdesc, tree body)
else
{
/* Calculate stride = size * (ubound + 1 - lbound). */
- tmp = fold (build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, lbound));
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- ubound, tmp));
- size = fold (build2 (MULT_EXPR, gfc_array_index_type,
- size, tmp));
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
+ gfc_index_one_node, lbound);
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ ubound, tmp);
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type,
+ size, tmp);
stmt_packed = size;
}
@@ -3759,10 +3759,10 @@ gfc_conv_expr_descriptor (gfc_se * se, gfc_expr * expr, gfc_ss * ss)
}
tmp = gfc_conv_array_lbound (desc, n);
- tmp = fold (build2 (MINUS_EXPR, TREE_TYPE (tmp), start, tmp));
+ tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), start, tmp);
- tmp = fold (build2 (MULT_EXPR, TREE_TYPE (tmp), tmp, stride));
- offset = fold (build2 (PLUS_EXPR, TREE_TYPE (tmp), offset, tmp));
+ tmp = fold_build2 (MULT_EXPR, TREE_TYPE (tmp), tmp, stride);
+ offset = fold_build2 (PLUS_EXPR, TREE_TYPE (tmp), offset, tmp);
if (info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
{
@@ -3779,9 +3779,9 @@ gfc_conv_expr_descriptor (gfc_se * se, gfc_expr * expr, gfc_ss * ss)
if (!integer_onep (from))
{
/* Make sure the new section starts at 1. */
- tmp = fold (build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, from));
- to = fold (build2 (PLUS_EXPR, gfc_array_index_type, to, tmp));
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
+ gfc_index_one_node, from);
+ to = fold_build2 (PLUS_EXPR, gfc_array_index_type, to, tmp);
from = gfc_index_one_node;
}
tmp = gfc_conv_descriptor_lbound (parm, gfc_rank_cst[dim]);
@@ -3793,12 +3793,12 @@ gfc_conv_expr_descriptor (gfc_se * se, gfc_expr * expr, gfc_ss * ss)
/* Multiply the stride by the section stride to get the
total stride. */
- stride = fold (build2 (MULT_EXPR, gfc_array_index_type,
- stride, info->stride[dim]));
+ stride = fold_build2 (MULT_EXPR, gfc_array_index_type,
+ stride, info->stride[dim]);
if (se->direct_byref)
- base = fold (build2 (MINUS_EXPR, TREE_TYPE (base),
- base, stride));
+ base = fold_build2 (MINUS_EXPR, TREE_TYPE (base),
+ base, stride);
/* Store the new stride. */
tmp = gfc_conv_descriptor_stride (parm, gfc_rank_cst[dim]);
diff --git a/gcc/fortran/trans-expr.c b/gcc/fortran/trans-expr.c
index b79d0743dec..2db5fbdc7bb 100644
--- a/gcc/fortran/trans-expr.c
+++ b/gcc/fortran/trans-expr.c
@@ -513,7 +513,7 @@ gfc_conv_powi (gfc_se * se, int n, tree * tmpvar)
op1 = op0;
}
- tmp = fold (build2 (MULT_EXPR, TREE_TYPE (op0), op0, op1));
+ tmp = fold_build2 (MULT_EXPR, TREE_TYPE (op0), op0, op1);
tmp = gfc_evaluate_now (tmp, &se->pre);
if (n < POWI_TABLE_SIZE)
@@ -738,9 +738,8 @@ gfc_conv_string_tmp (gfc_se * se, tree type, tree len)
if (gfc_can_put_var_on_stack (len))
{
/* Create a temporary variable to hold the result. */
- tmp = fold (build2 (MINUS_EXPR, gfc_charlen_type_node, len,
- convert (gfc_charlen_type_node,
- integer_one_node)));
+ tmp = fold_build2 (MINUS_EXPR, gfc_charlen_type_node, len,
+ convert (gfc_charlen_type_node, integer_one_node));
tmp = build_range_type (gfc_array_index_type, gfc_index_zero_node, tmp);
tmp = build_array_type (gfc_character1_type_node, tmp);
var = gfc_create_var (tmp, "str");
@@ -797,8 +796,8 @@ gfc_conv_concat_op (gfc_se * se, gfc_expr * expr)
len = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
if (len == NULL_TREE)
{
- len = fold (build2 (PLUS_EXPR, TREE_TYPE (lse.string_length),
- lse.string_length, rse.string_length));
+ len = fold_build2 (PLUS_EXPR, TREE_TYPE (lse.string_length),
+ lse.string_length, rse.string_length);
}
type = build_pointer_type (type);
@@ -990,11 +989,11 @@ gfc_conv_expr_op (gfc_se * se, gfc_expr * expr)
if (lop)
{
/* The result of logical ops is always boolean_type_node. */
- tmp = fold (build2 (code, type, lse.expr, rse.expr));
+ tmp = fold_build2 (code, type, lse.expr, rse.expr);
se->expr = convert (type, tmp);
}
else
- se->expr = fold (build2 (code, type, lse.expr, rse.expr));
+ se->expr = fold_build2 (code, type, lse.expr, rse.expr);
/* Add the post blocks. */
gfc_add_block_to_block (&se->post, &rse.post);
diff --git a/gcc/fortran/trans-intrinsic.c b/gcc/fortran/trans-intrinsic.c
index 20bddbd17d7..45581745b44 100644
--- a/gcc/fortran/trans-intrinsic.c
+++ b/gcc/fortran/trans-intrinsic.c
@@ -264,11 +264,11 @@ build_round_expr (stmtblock_t * pblock, tree arg, tree type)
neg = build_real (argtype, r);
tmp = gfc_build_const (argtype, integer_zero_node);
- cond = fold (build2 (GT_EXPR, boolean_type_node, arg, tmp));
+ cond = fold_build2 (GT_EXPR, boolean_type_node, arg, tmp);
- tmp = fold (build3 (COND_EXPR, argtype, cond, pos, neg));
- tmp = fold (build2 (PLUS_EXPR, argtype, arg, tmp));
- return fold (build1 (FIX_TRUNC_EXPR, type, tmp));
+ tmp = fold_build3 (COND_EXPR, argtype, cond, pos, neg);
+ tmp = fold_build2 (PLUS_EXPR, argtype, arg, tmp);
+ return fold_build1 (FIX_TRUNC_EXPR, type, tmp);
}
@@ -645,8 +645,8 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper)
gcc_assert (se->ss->expr == expr);
gfc_advance_se_ss_chain (se);
bound = se->loop->loopvar[0];
- bound = fold (build2 (MINUS_EXPR, gfc_array_index_type, bound,
- se->loop->from[0]));
+ bound = fold_build2 (MINUS_EXPR, gfc_array_index_type, bound,
+ se->loop->from[0]);
}
else
{
@@ -657,8 +657,8 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper)
gfc_add_block_to_block (&se->pre, &argse.pre);
bound = argse.expr;
/* Convert from one based to zero based. */
- bound = fold (build2 (MINUS_EXPR, gfc_array_index_type, bound,
- gfc_index_one_node));
+ bound = fold_build2 (MINUS_EXPR, gfc_array_index_type, bound,
+ gfc_index_one_node);
}
/* TODO: don't re-evaluate the descriptor on each iteration. */
@@ -683,11 +683,11 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper)
if (flag_bounds_check)
{
bound = gfc_evaluate_now (bound, &se->pre);
- cond = fold (build2 (LT_EXPR, boolean_type_node,
- bound, build_int_cst (TREE_TYPE (bound), 0)));
+ cond = fold_build2 (LT_EXPR, boolean_type_node,
+ bound, build_int_cst (TREE_TYPE (bound), 0));
tmp = gfc_rank_cst[GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))];
- tmp = fold (build2 (GE_EXPR, boolean_type_node, bound, tmp));
- cond = fold(build2 (TRUTH_ORIF_EXPR, boolean_type_node, cond, tmp));
+ tmp = fold_build2 (GE_EXPR, boolean_type_node, bound, tmp);
+ cond = fold_build2 (TRUTH_ORIF_EXPR, boolean_type_node, cond, tmp);
gfc_trans_runtime_check (cond, gfc_strconst_fault, &se->pre);
}
}
@@ -765,7 +765,7 @@ gfc_conv_intrinsic_cmplx (gfc_se * se, gfc_expr * expr, int both)
else
imag = build_real_from_int_cst (TREE_TYPE (type), integer_zero_node);
- se->expr = fold (build2 (COMPLEX_EXPR, type, real, imag));
+ se->expr = fold_build2 (COMPLEX_EXPR, type, real, imag);
}
/* Remainder function MOD(A, P) = A - INT(A / P) * P
@@ -903,11 +903,11 @@ gfc_conv_intrinsic_sign (gfc_se * se, gfc_expr * expr)
type = TREE_TYPE (arg);
zero = gfc_build_const (type, integer_zero_node);
- testa = fold (build2 (GE_EXPR, boolean_type_node, arg, zero));
- testb = fold (build2 (GE_EXPR, boolean_type_node, arg2, zero));
- tmp = fold (build2 (TRUTH_XOR_EXPR, boolean_type_node, testa, testb));
- se->expr = fold (build3 (COND_EXPR, type, tmp,
- build1 (NEGATE_EXPR, type, arg), arg));
+ testa = fold_build2 (GE_EXPR, boolean_type_node, arg, zero);
+ testb = fold_build2 (GE_EXPR, boolean_type_node, arg2, zero);
+ tmp = fold_build2 (TRUTH_XOR_EXPR, boolean_type_node, testa, testb);
+ se->expr = fold_build3 (COND_EXPR, type, tmp,
+ build1 (NEGATE_EXPR, type, arg), arg);
}
@@ -1433,7 +1433,7 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, int op)
/* Most negative(+HUGE) for maxval, most negative (-HUGE) for minval. */
if (op == GT_EXPR)
- tmp = fold (build1 (NEGATE_EXPR, TREE_TYPE (tmp), tmp));
+ tmp = fold_build1 (NEGATE_EXPR, TREE_TYPE (tmp), tmp);
gfc_add_modify_expr (&se->pre, limit, tmp);
/* Initialize the scalarizer. */
@@ -1452,12 +1452,12 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, int op)
size we need to return zero. Otherwise use the first element of the
array, in case all elements are equal to the limit.
i.e. pos = (ubound >= lbound) ? lbound, lbound - 1; */
- tmp = fold (build2 (MINUS_EXPR, gfc_array_index_type,
- loop.from[0], gfc_index_one_node));
- cond = fold (build2 (GE_EXPR, boolean_type_node,
- loop.to[0], loop.from[0]));
- tmp = fold (build3 (COND_EXPR, gfc_array_index_type, cond,
- loop.from[0], tmp));
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
+ loop.from[0], gfc_index_one_node);
+ cond = fold_build2 (GE_EXPR, boolean_type_node,
+ loop.to[0], loop.from[0]);
+ tmp = fold_build3 (COND_EXPR, gfc_array_index_type, cond,
+ loop.from[0], tmp);
gfc_add_modify_expr (&loop.pre, pos, tmp);
gfc_mark_ss_chain_used (arrayss, 1);
@@ -1521,9 +1521,9 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, int op)
gfc_cleanup_loop (&loop);
/* Return a value in the range 1..SIZE(array). */
- tmp = fold (build2 (MINUS_EXPR, gfc_array_index_type, loop.from[0],
- gfc_index_one_node));
- tmp = fold (build2 (MINUS_EXPR, gfc_array_index_type, pos, tmp));
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, loop.from[0],
+ gfc_index_one_node);
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, pos, tmp);
/* And convert to the required type. */
se->expr = convert (type, tmp);
}
@@ -1573,7 +1573,7 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, int op)
/* Most negative(-HUGE) for maxval, most positive (-HUGE) for minval. */
if (op == GT_EXPR)
- tmp = fold (build1 (NEGATE_EXPR, TREE_TYPE (tmp), tmp));
+ tmp = fold_build1 (NEGATE_EXPR, TREE_TYPE (tmp), tmp);
gfc_add_modify_expr (&se->pre, limit, tmp);
/* Walk the arguments. */
@@ -1670,8 +1670,8 @@ gfc_conv_intrinsic_btest (gfc_se * se, gfc_expr * expr)
tmp = build2 (LSHIFT_EXPR, type, build_int_cst (type, 1), arg2);
tmp = build2 (BIT_AND_EXPR, type, arg, tmp);
- tmp = fold (build2 (NE_EXPR, boolean_type_node, tmp,
- build_int_cst (type, 0)));
+ tmp = fold_build2 (NE_EXPR, boolean_type_node, tmp,
+ build_int_cst (type, 0));
type = gfc_typenode_for_spec (&expr->ts);
se->expr = convert (type, tmp);
}
@@ -1689,7 +1689,7 @@ gfc_conv_intrinsic_bitop (gfc_se * se, gfc_expr * expr, int op)
arg = TREE_VALUE (arg);
type = TREE_TYPE (arg);
- se->expr = fold (build2 (op, type, arg, arg2));
+ se->expr = fold_build2 (op, type, arg, arg2);
}
/* Bitwise not. */
@@ -1719,15 +1719,15 @@ gfc_conv_intrinsic_singlebitop (gfc_se * se, gfc_expr * expr, int set)
arg = TREE_VALUE (arg);
type = TREE_TYPE (arg);
- tmp = fold (build2 (LSHIFT_EXPR, type, build_int_cst (type, 1), arg2));
+ tmp = fold_build2 (LSHIFT_EXPR, type, build_int_cst (type, 1), arg2);
if (set)
op = BIT_IOR_EXPR;
else
{
op = BIT_AND_EXPR;
- tmp = fold (build1 (BIT_NOT_EXPR, type, tmp));
+ tmp = fold_build1 (BIT_NOT_EXPR, type, tmp);
}
- se->expr = fold (build2 (op, type, arg, tmp));
+ se->expr = fold_build2 (op, type, arg, tmp);
}
/* Extract a sequence of bits.
@@ -1755,7 +1755,7 @@ gfc_conv_intrinsic_ibits (gfc_se * se, gfc_expr * expr)
tmp = build2 (RSHIFT_EXPR, type, arg, arg2);
- se->expr = fold (build2 (BIT_AND_EXPR, type, tmp, mask));
+ se->expr = fold_build2 (BIT_AND_EXPR, type, tmp, mask);
}
/* ISHFT (I, SHIFT) = (abs (shift) >= BIT_SIZE (i))
@@ -1782,10 +1782,10 @@ gfc_conv_intrinsic_ishft (gfc_se * se, gfc_expr * expr)
type = TREE_TYPE (arg);
utype = gfc_unsigned_type (type);
- width = fold (build1 (ABS_EXPR, TREE_TYPE (arg2), arg2));
+ width = fold_build1 (ABS_EXPR, TREE_TYPE (arg2), arg2);
/* Left shift if positive. */
- lshift = fold (build2 (LSHIFT_EXPR, type, arg, width));
+ lshift = fold_build2 (LSHIFT_EXPR, type, arg, width);
/* Right shift if negative.
We convert to an unsigned type because we want a logical shift.
@@ -1795,18 +1795,18 @@ gfc_conv_intrinsic_ishft (gfc_se * se, gfc_expr * expr)
rshift = fold_convert (type, build2 (RSHIFT_EXPR, utype,
convert (utype, arg), width));
- tmp = fold (build2 (GE_EXPR, boolean_type_node, arg2,
- build_int_cst (TREE_TYPE (arg2), 0)));
- tmp = fold (build3 (COND_EXPR, type, tmp, lshift, rshift));
+ tmp = fold_build2 (GE_EXPR, boolean_type_node, arg2,
+ build_int_cst (TREE_TYPE (arg2), 0));
+ tmp = fold_build3 (COND_EXPR, type, tmp, lshift, rshift);
/* The Fortran standard allows shift widths <= BIT_SIZE(I), whereas
gcc requires a shift width < BIT_SIZE(I), so we have to catch this
special case. */
num_bits = build_int_cst (TREE_TYPE (arg2), TYPE_PRECISION (type));
- cond = fold (build2 (GE_EXPR, boolean_type_node, width, num_bits));
+ cond = fold_build2 (GE_EXPR, boolean_type_node, width, num_bits);
- se->expr = fold (build3 (COND_EXPR, type, cond,
- build_int_cst (type, 0), tmp));
+ se->expr = fold_build3 (COND_EXPR, type, cond,
+ build_int_cst (type, 0), tmp);
}
/* Circular shift. AKA rotate or barrel shift. */
@@ -1872,19 +1872,19 @@ gfc_conv_intrinsic_ishftc (gfc_se * se, gfc_expr * expr)
type = TREE_TYPE (arg);
/* Rotate left if positive. */
- lrot = fold (build2 (LROTATE_EXPR, type, arg, arg2));
+ lrot = fold_build2 (LROTATE_EXPR, type, arg, arg2);
/* Rotate right if negative. */
- tmp = fold (build1 (NEGATE_EXPR, TREE_TYPE (arg2), arg2));
- rrot = fold (build2 (RROTATE_EXPR, type, arg, tmp));
+ tmp = fold_build1 (NEGATE_EXPR, TREE_TYPE (arg2), arg2);
+ rrot = fold_build2 (RROTATE_EXPR, type, arg, tmp);
zero = build_int_cst (TREE_TYPE (arg2), 0);
- tmp = fold (build2 (GT_EXPR, boolean_type_node, arg2, zero));
- rrot = fold (build3 (COND_EXPR, type, tmp, lrot, rrot));
+ tmp = fold_build2 (GT_EXPR, boolean_type_node, arg2, zero);
+ rrot = fold_build3 (COND_EXPR, type, tmp, lrot, rrot);
/* Do nothing if shift == 0. */
- tmp = fold (build2 (EQ_EXPR, boolean_type_node, arg2, zero));
- se->expr = fold (build3 (COND_EXPR, type, tmp, arg, rrot));
+ tmp = fold_build2 (EQ_EXPR, boolean_type_node, arg2, zero);
+ se->expr = fold_build3 (COND_EXPR, type, tmp, arg, rrot);
}
/* The length of a character string. */
@@ -2037,7 +2037,7 @@ gfc_conv_intrinsic_merge (gfc_se * se, gfc_expr * expr)
se->string_length = len;
}
type = TREE_TYPE (tsource);
- se->expr = fold (build3 (COND_EXPR, type, mask, tsource, fsource));
+ se->expr = fold_build3 (COND_EXPR, type, mask, tsource, fsource);
}
@@ -2374,18 +2374,18 @@ prepare_arg_info (gfc_se * se, gfc_expr * expr,
rcs->fdigits = convert (masktype, tmp);
wbits = build_int_cst (NULL_TREE, TYPE_PRECISION (rcs->type) - 1);
wbits = convert (masktype, wbits);
- rcs->edigits = fold (build2 (MINUS_EXPR, masktype, wbits, tmp));
+ rcs->edigits = fold_build2 (MINUS_EXPR, masktype, wbits, tmp);
/* Form masks for exponent/fraction/sign */
one = gfc_build_const (masktype, integer_one_node);
- rcs->smask = fold (build2 (LSHIFT_EXPR, masktype, one, wbits));
- rcs->f1 = fold (build2 (LSHIFT_EXPR, masktype, one, rcs->fdigits));
- rcs->emask = fold (build2 (MINUS_EXPR, masktype, rcs->smask, rcs->f1));
- rcs->fmask = fold (build2 (MINUS_EXPR, masktype, rcs->f1, one));
+ rcs->smask = fold_build2 (LSHIFT_EXPR, masktype, one, wbits);
+ rcs->f1 = fold_build2 (LSHIFT_EXPR, masktype, one, rcs->fdigits);
+ rcs->emask = fold_build2 (MINUS_EXPR, masktype, rcs->smask, rcs->f1);
+ rcs->fmask = fold_build2 (MINUS_EXPR, masktype, rcs->f1, one);
/* Form bias. */
- tmp = fold (build2 (MINUS_EXPR, masktype, rcs->edigits, one));
- tmp = fold (build2 (LSHIFT_EXPR, masktype, one, tmp));
- rcs->bias = fold (build2 (MINUS_EXPR, masktype, tmp ,one));
+ tmp = fold_build2 (MINUS_EXPR, masktype, rcs->edigits, one);
+ tmp = fold_build2 (LSHIFT_EXPR, masktype, one, tmp);
+ rcs->bias = fold_build2 (MINUS_EXPR, masktype, tmp ,one);
if (all)
{
@@ -2510,7 +2510,7 @@ gfc_conv_intrinsic_rrspacing (gfc_se * se, gfc_expr * expr)
fraction = rcs.frac;
one = gfc_build_const (masktype, integer_one_node);
zero = gfc_build_const (masktype, integer_zero_node);
- t2 = fold (build2 (PLUS_EXPR, masktype, rcs.edigits, one));
+ t2 = fold_build2 (PLUS_EXPR, masktype, rcs.edigits, one);
t1 = call_builtin_clz (masktype, fraction);
tmp = build2 (PLUS_EXPR, masktype, t1, one);
@@ -2519,8 +2519,8 @@ gfc_conv_intrinsic_rrspacing (gfc_se * se, gfc_expr * expr)
cond = build2 (EQ_EXPR, boolean_type_node, rcs.expn, zero);
fraction = build3 (COND_EXPR, masktype, cond, tmp, fraction);
- tmp = fold (build2 (PLUS_EXPR, masktype, rcs.bias, fdigits));
- tmp = fold (build2 (LSHIFT_EXPR, masktype, tmp, fdigits));
+ tmp = fold_build2 (PLUS_EXPR, masktype, rcs.bias, fdigits);
+ tmp = fold_build2 (LSHIFT_EXPR, masktype, tmp, fdigits);
tmp = build2 (BIT_IOR_EXPR, masktype, tmp, fraction);
cond2 = build2 (EQ_EXPR, boolean_type_node, rcs.frac, zero);
@@ -2634,7 +2634,7 @@ gfc_conv_intrinsic_repeat (gfc_se * se, gfc_expr * expr)
len = TREE_VALUE (args);
tmp = gfc_advance_chain (args, 2);
ncopies = TREE_VALUE (tmp);
- len = fold (build2 (MULT_EXPR, gfc_int4_type_node, len, ncopies));
+ len = fold_build2 (MULT_EXPR, gfc_int4_type_node, len, ncopies);
type = gfc_get_character_type (expr->ts.kind, expr->ts.cl);
var = gfc_conv_string_tmp (se, build_pointer_type (type), len);
diff --git a/gcc/fortran/trans-stmt.c b/gcc/fortran/trans-stmt.c
index 524412725ab..3adf86879e9 100644
--- a/gcc/fortran/trans-stmt.c
+++ b/gcc/fortran/trans-stmt.c
@@ -587,9 +587,9 @@ gfc_trans_simple_do (gfc_code * code, stmtblock_t *pblock, tree dovar,
/* Only execute the loop if the number of iterations is positive. */
if (tree_int_cst_sgn (step) > 0)
- cond = fold (build2 (LE_EXPR, boolean_type_node, dovar, to));
+ cond = fold_build2 (LE_EXPR, boolean_type_node, dovar, to);
else
- cond = fold (build2 (GE_EXPR, boolean_type_node, dovar, to));
+ cond = fold_build2 (GE_EXPR, boolean_type_node, dovar, to);
tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt ());
gfc_add_expr_to_block (pblock, tmp);
@@ -685,11 +685,11 @@ gfc_trans_do (gfc_code * code)
/* Initialize loop count. This code is executed before we enter the
loop body. We generate: count = (to + step - from) / step. */
- tmp = fold (build2 (MINUS_EXPR, type, step, from));
- tmp = fold (build2 (PLUS_EXPR, type, to, tmp));
+ tmp = fold_build2 (MINUS_EXPR, type, step, from);
+ tmp = fold_build2 (PLUS_EXPR, type, to, tmp);
if (TREE_CODE (type) == INTEGER_TYPE)
{
- tmp = fold (build2 (TRUNC_DIV_EXPR, type, tmp, step));
+ tmp = fold_build2 (TRUNC_DIV_EXPR, type, tmp, step);
count = gfc_create_var (type, "count");
}
else
@@ -697,8 +697,8 @@ gfc_trans_do (gfc_code * code)
/* TODO: We could use the same width as the real type.
This would probably cause more problems that it solves
when we implement "long double" types. */
- tmp = fold (build2 (RDIV_EXPR, type, tmp, step));
- tmp = fold (build1 (FIX_TRUNC_EXPR, gfc_array_index_type, tmp));
+ tmp = fold_build2 (RDIV_EXPR, type, tmp, step);
+ tmp = fold_build1 (FIX_TRUNC_EXPR, gfc_array_index_type, tmp);
count = gfc_create_var (gfc_array_index_type, "count");
}
gfc_add_modify_expr (&block, count, tmp);
@@ -810,7 +810,7 @@ gfc_trans_do_while (gfc_code * code)
gfc_init_se (&cond, NULL);
gfc_conv_expr_val (&cond, code->expr);
gfc_add_block_to_block (&block, &cond.pre);
- cond.expr = fold (build1 (TRUTH_NOT_EXPR, boolean_type_node, cond.expr));
+ cond.expr = fold_build1 (TRUTH_NOT_EXPR, boolean_type_node, cond.expr);
/* Build "IF (! cond) GOTO exit_label". */
tmp = build1_v (GOTO_EXPR, exit_label);
@@ -1388,9 +1388,9 @@ gfc_trans_forall_loop (forall_info *forall_tmp, int nvar, tree body, int mask_fl
gfc_add_modify_expr (&block, var, start);
/* Initialize the loop counter. */
- tmp = fold (build2 (MINUS_EXPR, TREE_TYPE (var), step, start));
- tmp = fold (build2 (PLUS_EXPR, TREE_TYPE (var), end, tmp));
- tmp = fold (build2 (TRUNC_DIV_EXPR, TREE_TYPE (var), tmp, step));
+ tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (var), step, start);
+ tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (var), end, tmp);
+ tmp = fold_build2 (TRUNC_DIV_EXPR, TREE_TYPE (var), tmp, step);
gfc_add_modify_expr (&block, count, tmp);
/* The loop expression. */
@@ -1479,8 +1479,8 @@ gfc_do_allocate (tree bytesize, tree size, tree * pdata, stmtblock_t * pblock,
if (INTEGER_CST_P (size))
{
- tmp = fold (build2 (MINUS_EXPR, gfc_array_index_type, size,
- gfc_index_one_node));
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, size,
+ gfc_index_one_node);
}
else
tmp = NULL_TREE;
@@ -1548,7 +1548,7 @@ generate_loop_for_temp_to_lhs (gfc_expr *expr, tree tmp1, tree size,
gfc_add_block_to_block (&block, &lse.post);
/* Increment the count1. */
- tmp = fold (build2 (PLUS_EXPR, TREE_TYPE (count1), count1, size));
+ tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (count1), count1, size);
gfc_add_modify_expr (&block, count1, tmp);
tmp = gfc_finish_block (&block);
}
@@ -1582,8 +1582,8 @@ generate_loop_for_temp_to_lhs (gfc_expr *expr, tree tmp1, tree size,
/* Form the expression of the temporary. */
if (lss != gfc_ss_terminator)
{
- index = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- count1, count2));
+ index = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count1, count2);
rse.expr = gfc_build_array_ref (tmp1, index);
}
/* Translate expr. */
@@ -1610,15 +1610,15 @@ generate_loop_for_temp_to_lhs (gfc_expr *expr, tree tmp1, tree size,
gfc_add_expr_to_block (&body, tmp);
/* Increment count2. */
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- count2, gfc_index_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count2, gfc_index_one_node);
gfc_add_modify_expr (&body, count2, tmp);
/* Increment count3. */
if (count3)
{
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- count3, gfc_index_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count3, gfc_index_one_node);
gfc_add_modify_expr (&body, count3, tmp);
}
@@ -1629,7 +1629,7 @@ generate_loop_for_temp_to_lhs (gfc_expr *expr, tree tmp1, tree size,
gfc_cleanup_loop (&loop1);
/* Increment count1. */
- tmp = fold (build2 (PLUS_EXPR, TREE_TYPE (count1), count1, size));
+ tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (count1), count1, size);
gfc_add_modify_expr (&block, count1, tmp);
tmp = gfc_finish_block (&block);
}
@@ -1689,7 +1689,7 @@ generate_loop_for_rhs_to_temp (gfc_expr *expr2, tree tmp1, tree size,
gfc_conv_expr (&rse, expr2);
/* Form the expression of the temporary. */
- index = fold (build2 (PLUS_EXPR, gfc_array_index_type, count1, count2));
+ index = fold_build2 (PLUS_EXPR, gfc_array_index_type, count1, count2);
lse.expr = gfc_build_array_ref (tmp1, index);
}
@@ -1720,15 +1720,15 @@ generate_loop_for_rhs_to_temp (gfc_expr *expr2, tree tmp1, tree size,
else
{
/* Increment count2. */
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- count2, gfc_index_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count2, gfc_index_one_node);
gfc_add_modify_expr (&body1, count2, tmp);
/* Increment count3. */
if (count3)
{
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- count3, gfc_index_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count3, gfc_index_one_node);
gfc_add_modify_expr (&body1, count3, tmp);
}
@@ -1743,7 +1743,7 @@ generate_loop_for_rhs_to_temp (gfc_expr *expr2, tree tmp1, tree size,
as tree nodes in SS may not be valid in different scope. */
}
/* Increment count1. */
- tmp = fold (build2 (PLUS_EXPR, TREE_TYPE (count1), count1, size));
+ tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (count1), count1, size);
gfc_add_modify_expr (&block, count1, tmp);
tmp = gfc_finish_block (&block);
@@ -1800,11 +1800,11 @@ compute_inner_temp_size (gfc_expr *expr1, gfc_expr *expr2,
/* Figure out how many elements we need. */
for (i = 0; i < loop.dimen; i++)
{
- tmp = fold (build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, loop.from[i]));
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- tmp, loop.to[i]));
- size = fold (build2 (MULT_EXPR, gfc_array_index_type, size, tmp));
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
+ gfc_index_one_node, loop.from[i]);
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ tmp, loop.to[i]);
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
}
gfc_add_block_to_block (pblock, &loop.pre);
size = gfc_evaluate_now (size, pblock);
@@ -1868,7 +1868,7 @@ allocate_temp_for_forall_nest (forall_info * nested_forall_info, tree type,
size = compute_overall_iter_number (nested_forall_info, inner_size, block);
unit = TYPE_SIZE_UNIT (type);
- bytesize = fold (build2 (MULT_EXPR, gfc_array_index_type, size, unit));
+ bytesize = fold_build2 (MULT_EXPR, gfc_array_index_type, size, unit);
*ptemp1 = NULL;
temp1 = gfc_do_allocate (bytesize, size, ptemp1, block, type);
@@ -2033,8 +2033,8 @@ gfc_trans_pointer_assign_need_temp (gfc_expr * expr1, gfc_expr * expr2,
gfc_add_block_to_block (&body, &rse.post);
/* Increment count. */
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- count, gfc_index_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count, gfc_index_one_node);
gfc_add_modify_expr (&body, count, tmp);
tmp = gfc_finish_block (&body);
@@ -2078,8 +2078,8 @@ gfc_trans_pointer_assign_need_temp (gfc_expr * expr1, gfc_expr * expr2,
gfc_add_modify_expr (&body, lse.expr, rse.expr);
gfc_add_block_to_block (&body, &lse.post);
/* Increment count. */
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- count, gfc_index_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count, gfc_index_one_node);
gfc_add_modify_expr (&body, count, tmp);
tmp = gfc_finish_block (&body);
@@ -2122,8 +2122,8 @@ gfc_trans_pointer_assign_need_temp (gfc_expr * expr1, gfc_expr * expr2,
gfc_add_block_to_block (&body, &lse.post);
/* Increment count. */
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- count, gfc_index_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count, gfc_index_one_node);
gfc_add_modify_expr (&body, count, tmp);
tmp = gfc_finish_block (&body);
@@ -2167,8 +2167,8 @@ gfc_trans_pointer_assign_need_temp (gfc_expr * expr1, gfc_expr * expr2,
gfc_add_block_to_block (&body, &lse.post);
/* Increment count. */
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- count, gfc_index_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count, gfc_index_one_node);
gfc_add_modify_expr (&body, count, tmp);
tmp = gfc_finish_block (&body);
@@ -2345,14 +2345,14 @@ gfc_trans_forall_1 (gfc_code * code, forall_info * nested_forall_info)
lenvar = NULL_TREE;
/* size = (end + step - start) / step. */
- tmp = fold (build2 (MINUS_EXPR, TREE_TYPE (start[n]),
- step[n], start[n]));
- tmp = fold (build2 (PLUS_EXPR, TREE_TYPE (end[n]), end[n], tmp));
+ tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (start[n]),
+ step[n], start[n]);
+ tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (end[n]), end[n], tmp);
- tmp = fold (build2 (FLOOR_DIV_EXPR, TREE_TYPE (tmp), tmp, step[n]));
+ tmp = fold_build2 (FLOOR_DIV_EXPR, TREE_TYPE (tmp), tmp, step[n]);
tmp = convert (gfc_array_index_type, tmp);
- size = fold (build2 (MULT_EXPR, gfc_array_index_type, size, tmp));
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
}
/* Record the nvar and size of current forall level. */
@@ -2376,8 +2376,8 @@ gfc_trans_forall_1 (gfc_code * code, forall_info * nested_forall_info)
if (code->expr)
{
/* Allocate the mask temporary. */
- bytesize = fold (build2 (MULT_EXPR, gfc_array_index_type, size,
- TYPE_SIZE_UNIT (boolean_type_node)));
+ bytesize = fold_build2 (MULT_EXPR, gfc_array_index_type, size,
+ TYPE_SIZE_UNIT (boolean_type_node));
mask = gfc_do_allocate (bytesize, size, &pmask, &block, boolean_type_node);
@@ -2658,8 +2658,8 @@ gfc_evaluate_where_mask (gfc_expr * me, forall_info * nested_forall_info,
else
{
/* Increment count. */
- tmp1 = fold (build2 (PLUS_EXPR, gfc_array_index_type, count,
- gfc_index_one_node));
+ tmp1 = fold_build2 (PLUS_EXPR, gfc_array_index_type, count,
+ gfc_index_one_node);
gfc_add_modify_expr (&body1, count, tmp1);
/* Generate the copying loops. */
@@ -2825,8 +2825,8 @@ gfc_trans_where_assign (gfc_expr *expr1, gfc_expr *expr2, tree mask,
if (lss == gfc_ss_terminator)
{
/* Increment count1. */
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- count1, gfc_index_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count1, gfc_index_one_node);
gfc_add_modify_expr (&body, count1, tmp);
/* Use the scalar assignment as is. */
@@ -2841,8 +2841,8 @@ gfc_trans_where_assign (gfc_expr *expr1, gfc_expr *expr2, tree mask,
{
/* Increment count1 before finish the main body of a scalarized
expression. */
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- count1, gfc_index_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count1, gfc_index_one_node);
gfc_add_modify_expr (&body, count1, tmp);
gfc_trans_scalarized_loop_boundary (&loop, &body);
@@ -2884,15 +2884,15 @@ gfc_trans_where_assign (gfc_expr *expr1, gfc_expr *expr2, tree mask,
gfc_add_expr_to_block (&body, tmp);
/* Increment count2. */
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- count2, gfc_index_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count2, gfc_index_one_node);
gfc_add_modify_expr (&body, count2, tmp);
}
else
{
/* Increment count1. */
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type,
- count1, gfc_index_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count1, gfc_index_one_node);
gfc_add_modify_expr (&body, count1, tmp);
}
diff --git a/gcc/fortran/trans-types.c b/gcc/fortran/trans-types.c
index 3ac068c0f11..b64f868dbad 100644
--- a/gcc/fortran/trans-types.c
+++ b/gcc/fortran/trans-types.c
@@ -926,8 +926,8 @@ gfc_get_dtype (tree type)
if (size && !INTEGER_CST_P (size))
{
tmp = build_int_cst (gfc_array_index_type, GFC_DTYPE_SIZE_SHIFT);
- tmp = fold (build2 (LSHIFT_EXPR, gfc_array_index_type, size, tmp));
- dtype = fold (build2 (PLUS_EXPR, gfc_array_index_type, tmp, dtype));
+ tmp = fold_build2 (LSHIFT_EXPR, gfc_array_index_type, size, tmp);
+ dtype = fold_build2 (PLUS_EXPR, gfc_array_index_type, tmp, dtype);
}
/* If we don't know the size we leave it as zero. This should never happen
for anything that is actually used. */
@@ -1160,11 +1160,11 @@ gfc_get_array_type_bounds (tree etype, int dimen, tree * lbound,
if (upper != NULL_TREE && lower != NULL_TREE && stride != NULL_TREE)
{
- tmp = fold (build2 (MINUS_EXPR, gfc_array_index_type, upper, lower));
- tmp = fold (build2 (PLUS_EXPR, gfc_array_index_type, tmp,
- gfc_index_one_node));
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, upper, lower);
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, tmp,
+ gfc_index_one_node);
stride =
- fold (build2 (MULT_EXPR, gfc_array_index_type, tmp, stride));
+ fold_build2 (MULT_EXPR, gfc_array_index_type, tmp, stride);
/* Check the folding worked. */
gcc_assert (INTEGER_CST_P (stride));
}
diff --git a/gcc/fortran/trans.c b/gcc/fortran/trans.c
index 8c9d342c5c2..70630cbecab 100644
--- a/gcc/fortran/trans.c
+++ b/gcc/fortran/trans.c
@@ -152,7 +152,7 @@ gfc_add_modify_expr (stmtblock_t * pblock, tree lhs, tree rhs)
|| AGGREGATE_TYPE_P (TREE_TYPE (lhs)));
#endif
- tmp = fold (build2_v (MODIFY_EXPR, lhs, rhs));
+ tmp = fold_build2 (MODIFY_EXPR, void_type_node, lhs, rhs);
gfc_add_expr_to_block (pblock, tmp);
}