summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbstarynk <bstarynk@138bc75d-0d04-0410-961f-82ee72b054a4>2012-09-05 08:56:12 +0000
committerbstarynk <bstarynk@138bc75d-0d04-0410-961f-82ee72b054a4>2012-09-05 08:56:12 +0000
commit6a802d8242d059d55f3cdd558199403c3b8fbb05 (patch)
treed128c5095aaa3608ec7c87a3c747396d6d736929
parentbf4993e7e79a3b597ba502231bf74f5676b08c19 (diff)
downloadgcc-6a802d8242d059d55f3cdd558199403c3b8fbb05.tar.gz
2012-09-05 Basile Starynkevitch <basile@starynkevitch.net>
MELT branch merged with trunk rev 190965 using svnmerge. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/melt-branch@190968 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--ChangeLog20
-rw-r--r--ChangeLog.MELT4
-rw-r--r--MAINTAINERS1
-rw-r--r--config/ChangeLog6
-rw-r--r--config/cloog.m412
-rw-r--r--config/isl.m412
-rwxr-xr-xconfigure85
-rw-r--r--configure.ac92
-rw-r--r--contrib/ChangeLog8
-rw-r--r--contrib/make_sunver.pl2
-rw-r--r--contrib/testsuite-management/x86_64-unknown-linux-gnu.xfail154
-rw-r--r--gcc/ChangeLog284
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/alias.c10
-rw-r--r--gcc/basic-block.h14
-rw-r--r--gcc/bb-reorder.c151
-rw-r--r--gcc/common.opt3
-rw-r--r--gcc/config/arm/arm.c97
-rw-r--r--gcc/config/avr/avr.c15
-rw-r--r--gcc/config/s390/s390.c36
-rw-r--r--gcc/config/sh/sh-protos.h3
-rw-r--r--gcc/config/sh/sh.c160
-rw-r--r--gcc/config/sh/sh.h4
-rw-r--r--gcc/config/sh/sh.md5
-rw-r--r--gcc/coverage.c12
-rw-r--r--gcc/cp/ChangeLog24
-rw-r--r--gcc/cp/cp-lang.c2
-rw-r--r--gcc/cp/cp-tree.h6
-rw-r--r--gcc/cp/decl.c12
-rw-r--r--gcc/cp/error.c6
-rw-r--r--gcc/cp/name-lookup.c7
-rw-r--r--gcc/cp/pt.c38
-rw-r--r--gcc/cp/semantics.c8
-rw-r--r--gcc/doc/gimple.texi19
-rw-r--r--gcc/doc/invoke.texi512
-rw-r--r--gcc/except.c25
-rw-r--r--gcc/fold-const.c20
-rw-r--r--gcc/fortran/ChangeLog22
-rw-r--r--gcc/fortran/class.c11
-rw-r--r--gcc/fortran/match.c4
-rw-r--r--gcc/gcc.c3
-rw-r--r--gcc/gcov-dump.c22
-rw-r--r--gcc/gcov-io.c259
-rw-r--r--gcc/gcov-io.h41
-rw-r--r--gcc/gimple-pretty-print.c4
-rw-r--r--gcc/gimple.c39
-rw-r--r--gcc/gimple.h8
-rw-r--r--gcc/gimplify.c6
-rw-r--r--gcc/omp-low.c2
-rw-r--r--gcc/profile.c157
-rw-r--r--gcc/sched-rgn.c2
-rw-r--r--gcc/stmt.c15
-rw-r--r--gcc/testsuite/ChangeLog34
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-intname.C6
-rw-r--r--gcc/testsuite/g++.dg/ext/flexary3.C10
-rw-r--r--gcc/testsuite/g++.dg/template/access24.C8
-rw-r--r--gcc/testsuite/g++.dg/template/defarg15.C19
-rw-r--r--gcc/testsuite/gcc.dg/fold-perm.c3
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr54458.c20
-rw-r--r--gcc/testsuite/gcc.target/arm/neon-vext-execute.c340
-rw-r--r--gcc/testsuite/gcc.target/arm/neon-vext.c115
-rw-r--r--gcc/testsuite/gfortran.dg/select_type_29.f0326
-rw-r--r--gcc/tree-cfg.c33
-rw-r--r--gcc/tree-eh.c6
-rw-r--r--gcc/tree-if-conv.c65
-rw-r--r--gcc/tree-ssa-loop-ch.c1
-rw-r--r--gcc/tree-ssa-pre.c144
-rw-r--r--gcc/tree-ssa-sccvn.c2
-rw-r--r--gcc/tree-ssa-structalias.c12
-rw-r--r--gcc/tree-ssa-threadupdate.c10
-rw-r--r--gcc/tree-switch-conversion.c12
-rw-r--r--gcc/tree-vrp.c2
-rw-r--r--gcc/valtrack.c8
-rw-r--r--gcc/vec.c22
-rw-r--r--gcc/vec.h1273
-rw-r--r--libgcc/ChangeLog8
-rw-r--r--libgcc/libgcov.c122
-rw-r--r--libstdc++-v3/ChangeLog46
-rw-r--r--libstdc++-v3/include/Makefile.am4
-rw-r--r--libstdc++-v3/include/Makefile.in4
-rw-r--r--libstdc++-v3/include/ext/random522
-rw-r--r--libstdc++-v3/include/ext/random.tcc314
-rw-r--r--libstdc++-v3/testsuite/26_numerics/random/beta_distribution/cons/default.cc43
-rw-r--r--libstdc++-v3/testsuite/26_numerics/random/beta_distribution/cons/parms.cc43
-rw-r--r--libstdc++-v3/testsuite/26_numerics/random/beta_distribution/operators/equal.cc42
-rw-r--r--libstdc++-v3/testsuite/26_numerics/random/beta_distribution/operators/inequal.cc42
-rw-r--r--libstdc++-v3/testsuite/26_numerics/random/beta_distribution/operators/serialize.cc44
-rw-r--r--libstdc++-v3/testsuite/26_numerics/random/beta_distribution/requirements/explicit_instantiation/1.cc26
-rw-r--r--libstdc++-v3/testsuite/26_numerics/random/beta_distribution/requirements/typedefs.cc34
-rw-r--r--libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/cons/default.cc49
-rw-r--r--libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/cons/parms.cc49
-rw-r--r--libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/operators/equal.cc42
-rw-r--r--libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/operators/inequal.cc42
-rw-r--r--libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/operators/serialize.cc44
-rw-r--r--libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/requirements/explicit_instantiation/1.cc26
-rw-r--r--libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/requirements/typedefs.cc34
96 files changed, 4778 insertions, 1439 deletions
diff --git a/ChangeLog b/ChangeLog
index 6f7e4e5114f..86c63a21668 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,23 @@
+2012-09-04 Jason Merrill <jason@redhat.com>
+
+ * configure.ac: Fix --enable-languages=all.
+
+2012-09-04 Christophe Lyon <christophe.lyon@st.com>
+
+ * MAINTAINERS (Write After Approval): Add myself.
+
+2012-09-03 Richard Guenther <rguenther@suse.de>
+
+ PR bootstrap/54138
+ * configure.ac: Re-organize ISL / CLOOG checks to allow
+ disabling with either --without-isl or --without-cloog.
+ * configure: Regenerated.
+
+2012-09-03 Georg-Johann Lay <avr@gjlay.de>
+
+ * configure.ac (noconfigdirs,target=avr): Add target-libquadmath.
+ * configure: Regenerate.
+
2012-08-27 Ulrich Drepper <drepper@gmail.com>
* MAINTAINERS: Fix my email address.
diff --git a/ChangeLog.MELT b/ChangeLog.MELT
index 69dc7bff973..f66db5991e5 100644
--- a/ChangeLog.MELT
+++ b/ChangeLog.MELT
@@ -1,4 +1,8 @@
+2012-09-05 Basile Starynkevitch <basile@starynkevitch.net>
+
+ MELT branch merged with trunk rev 190965 using svnmerge.
+
2012-09-03 Basile Starynkevitch <basile@starynkevitch.net>
MELT branch merged with trunk rev 190869 using svnmerge.
diff --git a/MAINTAINERS b/MAINTAINERS
index efad63b61ac..ea92e2e0f1f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -439,6 +439,7 @@ Manuel López-Ibáñez manu@gcc.gnu.org
Martin v. Löwis loewis@informatik.hu-berlin.de
H.J. Lu hjl.tools@gmail.com
Xinliang David Li davidxl@google.com
+Christophe Lyon christophe.lyon@st.com
Luis Machado luisgpm@br.ibm.com
Ziga Mahkovec ziga.mahkovec@klika.si
Simon Martin simartin@users.sourceforge.net
diff --git a/config/ChangeLog b/config/ChangeLog
index 4c2c1da563e..90008a5bca9 100644
--- a/config/ChangeLog
+++ b/config/ChangeLog
@@ -1,3 +1,9 @@
+2012-09-03 Richard Guenther <rguenther@suse.de>
+
+ PR bootstrap/54138
+ * config/cloog.m4: Adjust for toplevel reorg.
+ * config/isl.m4: Adjust.
+
2012-08-26 Art Haas <ahaas@impactweather.com>
* cloog.m4 (CLOOG_INIT_FLAGS): Use = instead of == in test.
diff --git a/config/cloog.m4 b/config/cloog.m4
index 5193f4e45d7..a92ef2ea7bd 100644
--- a/config/cloog.m4
+++ b/config/cloog.m4
@@ -22,12 +22,6 @@
# Initialize clooglibs/clooginc according to the user input.
AC_DEFUN([CLOOG_INIT_FLAGS],
[
- AC_ARG_WITH(cloog,
- [AS_HELP_STRING(
- [--with-cloog=PATH],
- [Specify prefix directory for the installed CLooG-PPL package.
- Equivalent to --with-cloog-include=PATH/include
- plus --with-cloog-lib=PATH/lib])])
AC_ARG_WITH([cloog-include],
[AS_HELP_STRING(
[--with-cloog-include=PATH],
@@ -72,11 +66,7 @@ AC_DEFUN([CLOOG_INIT_FLAGS],
fi
clooginc="-DCLOOG_INT_GMP ${clooginc}"
- clooglibs="${clooglibs} -lcloog-isl ${isllibs}"
-
- dnl Flags needed for CLOOG
- AC_SUBST(clooglibs)
- AC_SUBST(clooginc)
+ clooglibs="${clooglibs} -lcloog-isl ${isllibs} -lisl"
]
)
diff --git a/config/isl.m4 b/config/isl.m4
index 4f397246e2e..ba3fa1b7f83 100644
--- a/config/isl.m4
+++ b/config/isl.m4
@@ -23,12 +23,6 @@
# Initialize isllibs/islinc according to the user input.
AC_DEFUN([ISL_INIT_FLAGS],
[
- AC_ARG_WITH(isl,
- [AS_HELP_STRING(
- [--with-isl=PATH],
- [Specify prefix directory for the installed ISL package.
- Equivalent to --with-isl-include=PATH/include
- plus --with-isl-lib=PATH/lib])])
AC_ARG_WITH([isl-include],
[AS_HELP_STRING(
[--with-isl-include=PATH],
@@ -73,12 +67,6 @@ AC_DEFUN([ISL_INIT_FLAGS],
islinc='-I$$r/$(HOST_SUBDIR)/isl/include -I$$s/isl/include'
ENABLE_ISL_CHECK=no
fi
-
- isllibs="${isllibs} -lisl"
-
- dnl Flags needed for ISL
- AC_SUBST(isllibs)
- AC_SUBST(islinc)
]
)
diff --git a/configure b/configure
index 60e269e00e5..cd06e4ef0d6 100755
--- a/configure
+++ b/configure
@@ -646,7 +646,6 @@ extra_host_libiberty_configure_flags
clooginc
clooglibs
islinc
-isllibs
poststage1_ldflags
poststage1_libs
stage1_libs
@@ -768,11 +767,11 @@ with_stage1_ldflags
with_stage1_libs
with_boot_libs
with_boot_ldflags
+with_cloog
with_isl
with_isl_include
with_isl_lib
enable_isl_version_check
-with_cloog
with_cloog_include
with_cloog_lib
enable_cloog_version_check
@@ -1521,16 +1520,16 @@ Optional Packages:
--with-boot-libs=LIBS libraries for stage2 and later
--with-boot-ldflags=FLAGS
linker flags for stage2 and later
+ --with-cloog=PATH Specify prefix directory for the installed CLooG-ISL
+ package. Equivalent to
+ --with-cloog-include=PATH/include plus
+ --with-cloog-lib=PATH/lib
--with-isl=PATH Specify prefix directory for the installed ISL
package. Equivalent to
--with-isl-include=PATH/include plus
--with-isl-lib=PATH/lib
--with-isl-include=PATH Specify directory for installed ISL include files
--with-isl-lib=PATH Specify the directory for the installed ISL library
- --with-cloog=PATH Specify prefix directory for the installed CLooG-PPL
- package. Equivalent to
- --with-cloog-include=PATH/include plus
- --with-cloog-lib=PATH/lib
--with-cloog-include=PATH
Specify directory for installed CLooG include files
--with-cloog-lib=PATH Specify the directory for the installed CLooG
@@ -3153,6 +3152,13 @@ case "${target}" in
;;
esac
+# Disable libquadmath for some systems.
+case "${target}" in
+ avr-*-*)
+ noconfigdirs="$noconfigdirs target-libquadmath"
+ ;;
+esac
+
# Disable libstdc++-v3 for some systems.
case "${target}" in
*-*-vxworks*)
@@ -5602,7 +5608,15 @@ fi
-# Check for ISL
+# GCC GRAPHITE dependences, ISL and CLOOG which in turn requires ISL.
+# Basic setup is inlined here, actual checks are in config/cloog.m4 and
+# config/isl.m4
+
+
+# Check whether --with-cloog was given.
+if test "${with_cloog+set}" = set; then :
+ withval=$with_cloog;
+fi
# Check whether --with-isl was given.
@@ -5611,6 +5625,13 @@ if test "${with_isl+set}" = set; then :
fi
+# Treat either --without-cloog or --without-isl as a request to disable
+# GRAPHITE support and skip all following checks.
+if test "x$with_isl" != "xno" &&
+ test "x$with_cloog" != "xno"; then
+ # Check for ISL
+
+
# Check whether --with-isl-include was given.
if test "${with_isl_include+set}" = set; then :
withval=$with_isl_include;
@@ -5657,13 +5678,7 @@ fi
ENABLE_ISL_CHECK=no
fi
- isllibs="${isllibs} -lisl"
-
-
-
-
-if test "x$with_isl" != "xno"; then
if test "${ENABLE_ISL_CHECK}" = yes ; then
_isl_saved_CFLAGS=$CFLAGS
@@ -5720,7 +5735,6 @@ $as_echo "$gcc_cv_isl" >&6; }
-
if test "x${with_isl}" = xno; then
graphite_requested=no
elif test "x${with_isl}" != x \
@@ -5746,15 +5760,9 @@ $as_echo "$gcc_cv_isl" >&6; }
fi
-fi
-
-# Check for CLOOG
-
-# Check whether --with-cloog was given.
-if test "${with_cloog+set}" = set; then :
- withval=$with_cloog;
-fi
+ if test "x$gcc_cv_isl" != "xno"; then
+ # Check for CLOOG
# Check whether --with-cloog-include was given.
@@ -5803,17 +5811,10 @@ fi
fi
clooginc="-DCLOOG_INT_GMP ${clooginc}"
- clooglibs="${clooglibs} -lcloog-isl ${isllibs}"
-
-
+ clooglibs="${clooglibs} -lcloog-isl ${isllibs} -lisl"
-if test "x$isllibs" = x && test "x$islinc" = x; then
- clooglibs=
- clooginc=
-elif test "x$with_cloog" != "xno"; then
-
if test "${ENABLE_CLOOG_CHECK}" = yes ; then
@@ -5884,18 +5885,30 @@ $as_echo "$gcc_cv_cloog" >&6; }
&& test "x${clooglibs}" = x \
&& test "x${clooginc}" = x ; then
- as_fn_error "Unable to find a usable CLooG. See config.log for details." "$LINENO" 5
+ as_fn_error "Unable to find a usable CLooG. See config.log for details." "$LINENO" 5
fi
+ fi
fi
# If either the ISL or the CLooG check failed, disable builds of in-tree
# variants of both
-if test "x$clooglibs" = x && test "x$clooginc" = x; then
+if test "x$with_isl" = xno ||
+ test "x$with_cloog" = xno ||
+ test "x$gcc_cv_cloog" = xno ||
+ test "x$gcc_cv_isl" = xno; then
noconfigdirs="$noconfigdirs cloog isl"
+ islinc=
+ clooginc=
+ clooglibs=
fi
+
+
+
+
+
# Check for LTO support.
# Check whether --enable-lto was given.
if test "${enable_lto+set}" = set; then :
@@ -6099,6 +6112,7 @@ if test -d ${srcdir}/gcc; then
boot_language=yes
fi
+ add_this_lang=no
case ,${enable_languages}, in
*,${language},*)
# Language was explicitly selected; include it
@@ -6109,10 +6123,9 @@ if test -d ${srcdir}/gcc; then
;;
*,all,*)
# 'all' was selected, select it if it is a default language
- add_this_lang=${build_by_default}
- ;;
- *)
- add_this_lang=no
+ if test "$language" != "c"; then
+ add_this_lang=${build_by_default}
+ fi
;;
esac
diff --git a/configure.ac b/configure.ac
index 27692b45bf8..9bee624ec1c 100644
--- a/configure.ac
+++ b/configure.ac
@@ -544,6 +544,13 @@ case "${target}" in
;;
esac
+# Disable libquadmath for some systems.
+case "${target}" in
+ avr-*-*)
+ noconfigdirs="$noconfigdirs target-libquadmath"
+ ;;
+esac
+
# Disable libstdc++-v3 for some systems.
case "${target}" in
*-*-vxworks*)
@@ -1506,44 +1513,71 @@ AC_ARG_WITH(boot-ldflags,
fi])
AC_SUBST(poststage1_ldflags)
-# Check for ISL
-dnl Provide configure switches and initialize islinc & isllibs
-dnl with user input.
-ISL_INIT_FLAGS
-if test "x$with_isl" != "xno"; then
+# GCC GRAPHITE dependences, ISL and CLOOG which in turn requires ISL.
+# Basic setup is inlined here, actual checks are in config/cloog.m4 and
+# config/isl.m4
+
+AC_ARG_WITH(cloog,
+ [AS_HELP_STRING(
+ [--with-cloog=PATH],
+ [Specify prefix directory for the installed CLooG-ISL package.
+ Equivalent to --with-cloog-include=PATH/include
+ plus --with-cloog-lib=PATH/lib])])
+AC_ARG_WITH(isl,
+ [AS_HELP_STRING(
+ [--with-isl=PATH],
+ [Specify prefix directory for the installed ISL package.
+ Equivalent to --with-isl-include=PATH/include
+ plus --with-isl-lib=PATH/lib])])
+
+# Treat either --without-cloog or --without-isl as a request to disable
+# GRAPHITE support and skip all following checks.
+if test "x$with_isl" != "xno" &&
+ test "x$with_cloog" != "xno"; then
+ # Check for ISL
+ dnl Provide configure switches and initialize islinc & isllibs
+ dnl with user input.
+ ISL_INIT_FLAGS
dnl The minimal version of ISL required for Graphite.
ISL_CHECK_VERSION(0,10)
-
dnl Only execute fail-action, if ISL has been requested.
ISL_IF_FAILED([
AC_MSG_ERROR([Unable to find a usable ISL. See config.log for details.])])
-fi
-# Check for CLOOG
-dnl Provide configure switches and initialize clooginc & clooglibs
-dnl with user input.
-CLOOG_INIT_FLAGS
-if test "x$isllibs" = x && test "x$islinc" = x; then
- clooglibs=
- clooginc=
-elif test "x$with_cloog" != "xno"; then
- dnl The minimal version of CLooG required for Graphite.
- dnl
- dnl If we use CLooG-Legacy, the provided version information is
- dnl ignored.
- CLOOG_CHECK_VERSION(0,17,0)
-
- dnl Only execute fail-action, if CLooG has been requested.
- CLOOG_IF_FAILED([
- AC_MSG_ERROR([Unable to find a usable CLooG. See config.log for details.])])
+ if test "x$gcc_cv_isl" != "xno"; then
+ # Check for CLOOG
+ dnl Provide configure switches and initialize clooginc & clooglibs
+ dnl with user input.
+ CLOOG_INIT_FLAGS
+ dnl The minimal version of CLooG required for Graphite.
+ dnl
+ dnl If we use CLooG-Legacy, the provided version information is
+ dnl ignored.
+ CLOOG_CHECK_VERSION(0,17,0)
+
+ dnl Only execute fail-action, if CLooG has been requested.
+ CLOOG_IF_FAILED([
+ AC_MSG_ERROR([Unable to find a usable CLooG. See config.log for details.])])
+ fi
fi
# If either the ISL or the CLooG check failed, disable builds of in-tree
# variants of both
-if test "x$clooglibs" = x && test "x$clooginc" = x; then
+if test "x$with_isl" = xno ||
+ test "x$with_cloog" = xno ||
+ test "x$gcc_cv_cloog" = xno ||
+ test "x$gcc_cv_isl" = xno; then
noconfigdirs="$noconfigdirs cloog isl"
+ islinc=
+ clooginc=
+ clooglibs=
fi
+AC_SUBST(islinc)
+AC_SUBST(clooglibs)
+AC_SUBST(clooginc)
+
+
# Check for LTO support.
AC_ARG_ENABLE(lto,
[AS_HELP_STRING([--enable-lto], [enable link time optimization support])],
@@ -1724,6 +1758,7 @@ if test -d ${srcdir}/gcc; then
boot_language=yes
fi
+ add_this_lang=no
case ,${enable_languages}, in
*,${language},*)
# Language was explicitly selected; include it
@@ -1734,10 +1769,9 @@ if test -d ${srcdir}/gcc; then
;;
*,all,*)
# 'all' was selected, select it if it is a default language
- add_this_lang=${build_by_default}
- ;;
- *)
- add_this_lang=no
+ if test "$language" != "c"; then
+ add_this_lang=${build_by_default}
+ fi
;;
esac
diff --git a/contrib/ChangeLog b/contrib/ChangeLog
index bb876380ec6..9173c6c7228 100644
--- a/contrib/ChangeLog
+++ b/contrib/ChangeLog
@@ -1,3 +1,11 @@
+2012-09-04 Eric Botcazou <ebotcazou@adacore.com>
+
+ * make_sunver.pl: Add missing newline at the end of extern "C++" block.
+
+2012-08-24 Diego Novillo <dnovillo@google.com>
+
+ * testsuite-management/x86_64-unknown-linux-gnu.xfail: Update.
+
2012-08-14 Diego Novillo <dnovillo@google.com>
* testsuite-management/x86_64-unknown-linux-gnu.xfail: New.
diff --git a/contrib/make_sunver.pl b/contrib/make_sunver.pl
index afdf973f5da..db0fe21ef43 100644
--- a/contrib/make_sunver.pl
+++ b/contrib/make_sunver.pl
@@ -185,7 +185,7 @@ while (<F>) {
$glob = 'glob';
if ($in_extern) {
$in_extern--;
- print "$1##$2";
+ print "$1##$2\n";
} else {
print;
}
diff --git a/contrib/testsuite-management/x86_64-unknown-linux-gnu.xfail b/contrib/testsuite-management/x86_64-unknown-linux-gnu.xfail
index c823fb20fbb..2315738a46b 100644
--- a/contrib/testsuite-management/x86_64-unknown-linux-gnu.xfail
+++ b/contrib/testsuite-management/x86_64-unknown-linux-gnu.xfail
@@ -1,78 +1,126 @@
+XPASS: gcc.dg/Wstrict-overflow-18.c (test for bogus messages, line 20)
FAIL: gcc.dg/attr-weakref-1.c (test for excess errors)
-FAIL: gcc.dg/torture/pr51106-2.c -O0 (internal compiler error)
-FAIL: gcc.dg/torture/pr51106-2.c -O0 (test for excess errors)
-FAIL: gcc.dg/torture/pr51106-2.c -O1 (internal compiler error)
-FAIL: gcc.dg/torture/pr51106-2.c -O1 (test for excess errors)
-FAIL: gcc.dg/torture/pr51106-2.c -O2 (internal compiler error)
-FAIL: gcc.dg/torture/pr51106-2.c -O2 (test for excess errors)
-FAIL: gcc.dg/torture/pr51106-2.c -O2 -flto -fno-use-linker-plugin -flto-partition=none (internal compiler error)
-FAIL: gcc.dg/torture/pr51106-2.c -O2 -flto -fno-use-linker-plugin -flto-partition=none (test for excess errors)
-FAIL: gcc.dg/torture/pr51106-2.c -O3 -fomit-frame-pointer (internal compiler error)
-FAIL: gcc.dg/torture/pr51106-2.c -O3 -fomit-frame-pointer (test for excess errors)
-FAIL: gcc.dg/torture/pr51106-2.c -O3 -g (internal compiler error)
-FAIL: gcc.dg/torture/pr51106-2.c -O3 -g (test for excess errors)
-FAIL: gcc.dg/torture/pr51106-2.c -Os (internal compiler error)
-FAIL: gcc.dg/torture/pr51106-2.c -Os (test for excess errors)
-FAIL: gfortran.dg/lto/pr45586 f_lto_pr45586_0.o-f_lto_pr45586_0.o link, -O0 -flto -flto-partition=1to1 -fno-use-linker-plugin (internal compiler error)
-FAIL: gfortran.dg/lto/pr45586 f_lto_pr45586_0.o-f_lto_pr45586_0.o link, -O0 -flto -flto-partition=none -fuse-linker-plugin (internal compiler error)
-FAIL: gfortran.dg/lto/pr45586 f_lto_pr45586_0.o-f_lto_pr45586_0.o link, -O0 -flto -fuse-linker-plugin -fno-fat-lto-objects (internal compiler error)
-FAIL: gfortran.dg/lto/pr45586-2 f_lto_pr45586-2_0.o-f_lto_pr45586-2_0.o link, -O0 -flto -flto-partition=1to1 -fno-use-linker-plugin (internal compiler error)
-FAIL: gfortran.dg/lto/pr45586-2 f_lto_pr45586-2_0.o-f_lto_pr45586-2_0.o link, -O0 -flto -flto-partition=none -fuse-linker-plugin (internal compiler error)
-FAIL: gfortran.dg/lto/pr45586-2 f_lto_pr45586-2_0.o-f_lto_pr45586-2_0.o link, -O0 -flto -fuse-linker-plugin -fno-fat-lto-objects (internal compiler error)
-FAIL: gnat.dg/array11.adb (test for warnings, line 12)
-FAIL: gnat.dg/object_overflow.adb (test for warnings, line 8)
-FAIL: libmudflap.c++/pass55-frag.cxx ( -O) execution test
-FAIL: libmudflap.c++/pass55-frag.cxx (-O2) execution test
-FAIL: libmudflap.c++/pass55-frag.cxx (-O3) execution test
UNRESOLVED: gcc.dg/attr-weakref-1.c compilation failed to produce executable
-UNRESOLVED: gfortran.dg/lto/pr45586 f_lto_pr45586_0.o-f_lto_pr45586_0.o execute -O0 -flto -flto-partition=1to1 -fno-use-linker-plugin
-UNRESOLVED: gfortran.dg/lto/pr45586 f_lto_pr45586_0.o-f_lto_pr45586_0.o execute -O0 -flto -flto-partition=none -fuse-linker-plugin
-UNRESOLVED: gfortran.dg/lto/pr45586 f_lto_pr45586_0.o-f_lto_pr45586_0.o execute -O0 -flto -fuse-linker-plugin -fno-fat-lto-objects
-UNRESOLVED: gfortran.dg/lto/pr45586-2 f_lto_pr45586-2_0.o-f_lto_pr45586-2_0.o execute -O0 -flto -flto-partition=1to1 -fno-use-linker-plugin
-UNRESOLVED: gfortran.dg/lto/pr45586-2 f_lto_pr45586-2_0.o-f_lto_pr45586-2_0.o execute -O0 -flto -flto-partition=none -fuse-linker-plugin
-UNRESOLVED: gfortran.dg/lto/pr45586-2 f_lto_pr45586-2_0.o-f_lto_pr45586-2_0.o execute -O0 -flto -fuse-linker-plugin -fno-fat-lto-objects
-XPASS: gcc.dg/Wstrict-overflow-18.c (test for bogus messages, line 20)
-XPASS: gcc.dg/guality/example.c -O0 execution test
-XPASS: gcc.dg/guality/example.c -O1 execution test
-XPASS: gcc.dg/guality/example.c -O2 execution test
XPASS: gcc.dg/guality/example.c -O2 -flto -fno-use-linker-plugin -flto-partition=none execution test
-XPASS: gcc.dg/guality/example.c -Os execution test
-XPASS: gcc.dg/guality/guality.c -O0 execution test
-XPASS: gcc.dg/guality/guality.c -O1 execution test
+XPASS: gcc.dg/guality/example.c -O2 execution test
+XPASS: gcc.dg/guality/example.c -O0 execution test
XPASS: gcc.dg/guality/guality.c -O2 execution test
-XPASS: gcc.dg/guality/guality.c -O2 -flto -fno-use-linker-plugin -flto-partition=none execution test
-XPASS: gcc.dg/guality/guality.c -O2 -flto -fuse-linker-plugin -fno-fat-lto-objects execution test
XPASS: gcc.dg/guality/guality.c -O3 -fomit-frame-pointer execution test
-XPASS: gcc.dg/guality/guality.c -O3 -g execution test
+XPASS: gcc.dg/guality/guality.c -O1 execution test
+XPASS: gcc.dg/guality/guality.c -O0 execution test
XPASS: gcc.dg/guality/guality.c -Os execution test
+XPASS: gcc.dg/guality/guality.c -O3 -g execution test
+XPASS: gcc.dg/guality/guality.c -O2 -flto -fno-use-linker-plugin -flto-partition=none execution test
+XPASS: gcc.dg/guality/guality.c -O2 -flto -fuse-linker-plugin -fno-fat-lto-objects execution test
XPASS: gcc.dg/guality/inline-params.c -O2 execution test
+XPASS: gcc.dg/guality/inline-params.c -O3 -g execution test
XPASS: gcc.dg/guality/inline-params.c -O2 -flto -fno-use-linker-plugin -flto-partition=none execution test
XPASS: gcc.dg/guality/inline-params.c -O2 -flto -fuse-linker-plugin -fno-fat-lto-objects execution test
XPASS: gcc.dg/guality/inline-params.c -O3 -fomit-frame-pointer execution test
-XPASS: gcc.dg/guality/inline-params.c -O3 -g execution test
XPASS: gcc.dg/guality/inline-params.c -Os execution test
-XPASS: gcc.dg/guality/pr41447-1.c -O0 execution test
XPASS: gcc.dg/guality/pr41447-1.c -O1 execution test
+XPASS: gcc.dg/guality/pr41447-1.c -O3 -fomit-frame-pointer execution test
+XPASS: gcc.dg/guality/pr41447-1.c -Os execution test
XPASS: gcc.dg/guality/pr41447-1.c -O2 execution test
+XPASS: gcc.dg/guality/pr41447-1.c -O3 -g execution test
XPASS: gcc.dg/guality/pr41447-1.c -O2 -flto -fno-use-linker-plugin -flto-partition=none execution test
+XPASS: gcc.dg/guality/pr41447-1.c -O0 execution test
XPASS: gcc.dg/guality/pr41447-1.c -O2 -flto -fuse-linker-plugin -fno-fat-lto-objects execution test
-XPASS: gcc.dg/guality/pr41447-1.c -O3 -fomit-frame-pointer execution test
-XPASS: gcc.dg/guality/pr41447-1.c -O3 -g execution test
-XPASS: gcc.dg/guality/pr41447-1.c -Os execution test
-XPASS: gcc.dg/guality/pr41616-1.c -O0 execution test
XPASS: gcc.dg/guality/pr41616-1.c -O1 execution test
+XPASS: gcc.dg/guality/pr41616-1.c -O3 -fomit-frame-pointer execution test
+XPASS: gcc.dg/guality/pr41616-1.c -O0 execution test
+XPASS: gcc.dg/guality/pr41616-1.c -Os execution test
XPASS: gcc.dg/guality/pr41616-1.c -O2 execution test
XPASS: gcc.dg/guality/pr41616-1.c -O2 -flto -fno-use-linker-plugin -flto-partition=none execution test
-XPASS: gcc.dg/guality/pr41616-1.c -O3 -fomit-frame-pointer execution test
XPASS: gcc.dg/guality/pr41616-1.c -O3 -g execution test
-XPASS: gcc.dg/guality/pr41616-1.c -Os execution test
XPASS: gcc.dg/inline_3.c (test for excess errors)
XPASS: gcc.dg/inline_4.c (test for excess errors)
-XPASS: gcc.dg/unroll_2.c (test for excess errors)
-XPASS: gcc.dg/unroll_3.c (test for excess errors)
-XPASS: gcc.dg/unroll_4.c (test for excess errors)
FAIL: gcc.dg/pr52558-1.c scan-tree-dump-times lim1 "MEM count_lsm.. count_lsm_flag" 1
FAIL: gcc.dg/pr52558-2.c scan-tree-dump-times lim1 "MEM.*g_2_lsm_flag" 1
FAIL: gcc.dg/tm/reg-promotion.c scan-tree-dump-times lim1 "MEM count_lsm.. count_lsm_flag" 1
-FAIL: go.test/test/stack.go execution, -O2 -g
+FAIL: gcc.dg/torture/pr51106-2.c -O3 -g (internal compiler error)
+FAIL: gcc.dg/torture/pr51106-2.c -O1 (test for excess errors)
+FAIL: gcc.dg/torture/pr51106-2.c -Os (internal compiler error)
+FAIL: gcc.dg/torture/pr51106-2.c -O3 -fomit-frame-pointer (internal compiler error)
+FAIL: gcc.dg/torture/pr51106-2.c -O2 -flto -fno-use-linker-plugin -flto-partition=none (internal compiler error)
+FAIL: gcc.dg/torture/pr51106-2.c -Os (test for excess errors)
+FAIL: gcc.dg/torture/pr51106-2.c -O2 -flto -fno-use-linker-plugin -flto-partition=none (test for excess errors)
+FAIL: gcc.dg/torture/pr51106-2.c -O3 -g (test for excess errors)
+FAIL: gcc.dg/torture/pr51106-2.c -O0 (internal compiler error)
+FAIL: gcc.dg/torture/pr51106-2.c -O1 (internal compiler error)
+FAIL: gcc.dg/torture/pr51106-2.c -O0 (test for excess errors)
+FAIL: gcc.dg/torture/pr51106-2.c -O2 (test for excess errors)
+FAIL: gcc.dg/torture/pr51106-2.c -O2 (internal compiler error)
+FAIL: gcc.dg/torture/pr51106-2.c -O3 -fomit-frame-pointer (test for excess errors)
+XPASS: gcc.dg/unroll_2.c (test for excess errors)
+XPASS: gcc.dg/unroll_3.c (test for excess errors)
+XPASS: gcc.dg/unroll_4.c (test for excess errors)
FAIL: gcc.target/i386/pad-10.c scan-assembler-not nop
+FAIL: gfortran.dg/lto/pr45586 f_lto_pr45586_0.o-f_lto_pr45586_0.o link, -O0 -flto -flto-partition=none -fuse-linker-plugin (internal compiler error)
+FAIL: gfortran.dg/lto/pr45586 f_lto_pr45586_0.o-f_lto_pr45586_0.o link, -O0 -flto -fuse-linker-plugin -fno-fat-lto-objects (internal compiler error)
+FAIL: gfortran.dg/lto/pr45586 f_lto_pr45586_0.o-f_lto_pr45586_0.o link, -O0 -flto -flto-partition=1to1 -fno-use-linker-plugin (internal compiler error)
+UNRESOLVED: gfortran.dg/lto/pr45586 f_lto_pr45586_0.o-f_lto_pr45586_0.o execute -O0 -flto -flto-partition=1to1 -fno-use-linker-plugin
+UNRESOLVED: gfortran.dg/lto/pr45586 f_lto_pr45586_0.o-f_lto_pr45586_0.o execute -O0 -flto -fuse-linker-plugin -fno-fat-lto-objects
+UNRESOLVED: gfortran.dg/lto/pr45586 f_lto_pr45586_0.o-f_lto_pr45586_0.o execute -O0 -flto -flto-partition=none -fuse-linker-plugin
+UNRESOLVED: gfortran.dg/lto/pr45586-2 f_lto_pr45586-2_0.o-f_lto_pr45586-2_0.o execute -O0 -flto -fuse-linker-plugin -fno-fat-lto-objects
+UNRESOLVED: gfortran.dg/lto/pr45586-2 f_lto_pr45586-2_0.o-f_lto_pr45586-2_0.o execute -O0 -flto -flto-partition=1to1 -fno-use-linker-plugin
+FAIL: gfortran.dg/lto/pr45586-2 f_lto_pr45586-2_0.o-f_lto_pr45586-2_0.o link, -O0 -flto -flto-partition=none -fuse-linker-plugin (internal compiler error)
+UNRESOLVED: gfortran.dg/lto/pr45586-2 f_lto_pr45586-2_0.o-f_lto_pr45586-2_0.o execute -O0 -flto -flto-partition=none -fuse-linker-plugin
+FAIL: gfortran.dg/lto/pr45586-2 f_lto_pr45586-2_0.o-f_lto_pr45586-2_0.o link, -O0 -flto -fuse-linker-plugin -fno-fat-lto-objects (internal compiler error)
+FAIL: gfortran.dg/lto/pr45586-2 f_lto_pr45586-2_0.o-f_lto_pr45586-2_0.o link, -O0 -flto -flto-partition=1to1 -fno-use-linker-plugin (internal compiler error)
+FAIL: gfortran.dg/realloc_on_assign_14.f90 -O (internal compiler error)
+FAIL: gfortran.dg/realloc_on_assign_14.f90 -O (test for excess errors)
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -O0 (test for excess errors)
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -O3 -g (test for excess errors)
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -O0 (internal compiler error)
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -O1 (test for excess errors)
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -O3 -fomit-frame-pointer -funroll-all-loops -finline-functions (test for excess errors)
+UNRESOLVED: gfortran.dg/realloc_on_assign_15.f90 -O3 -fomit-frame-pointer compilation failed to produce executable
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -O3 -fomit-frame-pointer (test for excess errors)
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -O2 (test for excess errors)
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -O3 -fomit-frame-pointer -funroll-loops (internal compiler error)
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -O1 (internal compiler error)
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -O2 (internal compiler error)
+UNRESOLVED: gfortran.dg/realloc_on_assign_15.f90 -Os compilation failed to produce executable
+UNRESOLVED: gfortran.dg/realloc_on_assign_15.f90 -O3 -fomit-frame-pointer -funroll-all-loops -finline-functions compilation failed to produce executable
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -Os (internal compiler error)
+UNRESOLVED: gfortran.dg/realloc_on_assign_15.f90 -O3 -fomit-frame-pointer -funroll-loops compilation failed to produce executable
+UNRESOLVED: gfortran.dg/realloc_on_assign_15.f90 -O1 compilation failed to produce executable
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -O3 -g (internal compiler error)
+UNRESOLVED: gfortran.dg/realloc_on_assign_15.f90 -O0 compilation failed to produce executable
+UNRESOLVED: gfortran.dg/realloc_on_assign_15.f90 -O3 -g compilation failed to produce executable
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -O3 -fomit-frame-pointer -funroll-all-loops -finline-functions (internal compiler error)
+UNRESOLVED: gfortran.dg/realloc_on_assign_15.f90 -O2 compilation failed to produce executable
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -O3 -fomit-frame-pointer (internal compiler error)
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -O3 -fomit-frame-pointer -funroll-loops (test for excess errors)
+FAIL: gfortran.dg/realloc_on_assign_15.f90 -Os (test for excess errors)
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -O3 -g (test for excess errors)
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -O2 (test for excess errors)
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -O0 (internal compiler error)
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -O0 (test for excess errors)
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -O3 -fomit-frame-pointer (internal compiler error)
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -O3 -g (internal compiler error)
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -O3 -fomit-frame-pointer -funroll-all-loops -finline-functions (internal compiler error)
+UNRESOLVED: gfortran.dg/realloc_on_assign_2.f03 -Os compilation failed to produce executable
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -O2 (internal compiler error)
+UNRESOLVED: gfortran.dg/realloc_on_assign_2.f03 -O3 -fomit-frame-pointer compilation failed to produce executable
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -O1 (internal compiler error)
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -O3 -fomit-frame-pointer -funroll-all-loops -finline-functions (test for excess errors)
+UNRESOLVED: gfortran.dg/realloc_on_assign_2.f03 -O0 compilation failed to produce executable
+UNRESOLVED: gfortran.dg/realloc_on_assign_2.f03 -O2 compilation failed to produce executable
+UNRESOLVED: gfortran.dg/realloc_on_assign_2.f03 -O3 -g compilation failed to produce executable
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -Os (internal compiler error)
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -O3 -fomit-frame-pointer (test for excess errors)
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -O1 (test for excess errors)
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -O3 -fomit-frame-pointer -funroll-loops (test for excess errors)
+UNRESOLVED: gfortran.dg/realloc_on_assign_2.f03 -O3 -fomit-frame-pointer -funroll-loops compilation failed to produce executable
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -Os (test for excess errors)
+FAIL: gfortran.dg/realloc_on_assign_2.f03 -O3 -fomit-frame-pointer -funroll-loops (internal compiler error)
+UNRESOLVED: gfortran.dg/realloc_on_assign_2.f03 -O3 -fomit-frame-pointer -funroll-all-loops -finline-functions compilation failed to produce executable
+UNRESOLVED: gfortran.dg/realloc_on_assign_2.f03 -O1 compilation failed to produce executable
+FAIL: gnat.dg/array11.adb (test for warnings, line 12)
+FAIL: gnat.dg/object_overflow.adb (test for warnings, line 8)
+FAIL: libmudflap.c++/pass55-frag.cxx (-O2) execution test
+FAIL: libmudflap.c++/pass55-frag.cxx ( -O) execution test
+FAIL: libmudflap.c++/pass55-frag.cxx (-O3) execution test
+FAIL: log
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 7e188999df7..38991a1351c 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,287 @@
+2012-09-04 Teresa Johnson <tejohnson@google.com>
+
+ * gcov-io.c (gcov_write_summary): Write out non-zero histogram
+ entries to function summary along with an occupancy bit vector.
+ (gcov_read_summary): Read in the histogram entries.
+ (gcov_histo_index): New function.
+ (void gcov_histogram_merge): Ditto.
+ * gcov-io.h (gcov_type_unsigned): New type.
+ (struct gcov_bucket_type): Ditto.
+ (struct gcov_ctr_summary): Include histogram.
+ (GCOV_TAG_SUMMARY_LENGTH): Update to include histogram entries.
+ (GCOV_HISTOGRAM_SIZE): New macro.
+ (GCOV_HISTOGRAM_BITVECTOR_SIZE): Ditto.
+ * profile.c (NUM_GCOV_WORKING_SETS): Ditto.
+ (gcov_working_sets): New global variable.
+ (compute_working_sets): New function.
+ (find_working_set): Ditto.
+ (get_exec_counts): Invoke compute_working_sets.
+ * coverage.c (read_counts_file): Merge histograms, and
+ fix bug with accessing summary info for non-summable counters.
+ * basic-block.h (gcov_type_unsigned): New type.
+ (struct gcov_working_set_info): Ditto.
+ (find_working_set): Declare.
+ * gcov-dump.c (tag_summary): Dump out histogram.
+
+2012-09-04 Diego Novillo <dnovillo@google.com>
+
+ PR bootstrap/54484
+ * vec.h (vec_t::embedded_init): Move default argument value
+ to function declaration.
+
+2012-09-04 Eric Botcazou <ebotcazou@adacore.com>
+
+ * bb-reorder.c (gate_handle_reorder_blocks): Move around.
+ (rest_of_handle_reorder_blocks): Likewise.
+ (pass_reorder_blocks): Likewise.
+ (gate_handle_partition_blocks): Likewise.
+
+2012-09-04 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR rtl-optimization/54456
+ * sched-rgn.c (gate_handle_sched): Return 1 only if optimize > 0.
+
+2012-09-04 Diego Novillo <dnovillo@google.com>
+
+ PR bootstrap/54478
+ * vec.h (vec_t::alloc): Remove explicit type specification
+ in call to reserve.
+ (vec_t::copy): Likewise.
+ (vec_t::reserve): Likewise.
+ (vec_t::reserve_exact): Likewise.
+ (vec_t::safe_splice): Likewise.
+ (vec_t::safe_push): Likewise.
+ (vec_t::safe_grow): Likewise.
+ (vec_t::safe_grow_cleared): Likewise.
+ (vec_t::safe_insert): Likewise.
+
+2012-09-04 Richard Henderson <rth@redhat.com>
+
+ * alias.c (read_dependence): Return true for ALIAS_SET_MEMORY_BARRIER.
+
+2012-09-04 Richard Guenther <rguenther@suse.de>
+
+ * tree-ssa-pre.c (value_expressions): Make it a vector of
+ bitmaps containing expression IDs.
+ (add_to_value): Adjust.
+ (sorted_array_from_bitmap_set): Likewise.
+ (bitmap_set_replace_value): Likewise.
+ (print_value_expressions): Likewise.
+ (get_constant_for_value_id): Likewise.
+ (get_representative_for): Likewise.
+ (phi_translate_1): Likewise.
+ (bitmap_find_leader): Likewise.
+ (find_or_generate_expression): Likewise.
+ (do_regular_insertion): Likewise.
+ (init_pre): Likewise.
+ (fini_pre): Likewise.
+
+2012-09-04 Diego Novillo <dnovillo@google.com>
+
+ PR bootstrap/54479
+ * vec.h (vec_t::copy): Add cast in call to reserve_exact.
+
+2012-09-04 Richard Guenther <rguenther@suse.de>
+
+ * tree-ssa-pre.c (add_to_exp_gen): Adjust.
+ (make_values_for_phi): Do not add to PHI_GEN for FRE.
+ (compute_avail): Stop processing after adding all defs to
+ AVAIL_OUT for FRE.
+ (init_pre): Do not allocate not needed bitmap sets for FRE.
+
+2012-09-04 Diego Novillo <dnovillo@google.com>
+
+ Rewrite VEC_* functions as member functions of vec_t.
+
+ * vec.h: Update documentation.
+ (ALONE_VEC_CHECK_INFO): Define.
+ (ALONE_VEC_CHECK_DECL): Define.
+ (ALONE_VEC_CHECK_PASS): Define.
+ (struct vec_prefix): Rename field NUM to NUM_.
+ Rename field ALLOC to ALLOC_.
+ Update all users.
+ (struct vec_t): Rename field PREFIX to PREFIX_.
+ Rename field VEC to VEC_.
+ Update all users.
+ (vec_t::length): Rename from VEC_length_1. Update all users.
+ (vec_t::empty): Rename from VEC_empty_1. Update all users.
+ (vec_t::address): Rename from VEC_address_1. Update all users.
+ (vec_address): New.
+ (vec_t::last): Rename from VEC_last_1. Update all users.
+ (vec_t::operator[]): Rename from VEC_index_1. Update all users.
+ (vec_t::iterate): Rename from VEC_iterate_1. Update all users.
+ (vec_t::embedded_size): Rename from VEC_embedded_size_1.
+ Update all users.
+ (vec_t::embedded_init): Rename from VEC_embedded_init_1.
+ Update all users.
+ (vec_t::alloc): Rename from VEC_alloc_1. Update all users.
+ (vec_t::free): Rename from VEC_free_1. Update all users.
+ (vec_t::copy): Rename from VEC_copy_1. Update all users.
+ (vec_t::space): Rename from VEC_space_1. Update all users.
+ (vec_t::reserve): Rename from VEC_reserve_1. Update all users.
+ (vec_t::reserve_exact): Rename from VEC_reserve_exact_1.
+ Update all users.
+ (vec_t::splice): Rename from VEC_splice_1. Update all users.
+ (vec_t::safe_splice): Rename from VEC_safe_splice_1. Update all users.
+ (vec_t::quick_push): Rename from VEC_quick_push_1. Update all users.
+ (vec_t::safe_push): Rename from VEC_safe_push_1. Update all users.
+ (vec_t::pop): Rename from VEC_pop_1. Update all users.
+ (vec_t::truncate): Rename from VEC_truncate_1. Update all users.
+ (vec_t::safe_grow): Rename from VEC_safe_grow_1. Update all users.
+ (vec_t::safe_grow_cleared): Rename from VEC_safe_grow_cleared_1.
+ Update all users.
+ (vec_t::replace): Rename from VEC_replace_1. Update all users.
+ (vec_t::quick_insert): Rename from VEC_quick_insert_1.
+ Update all users.
+ (vec_t::safe_insert): Rename from VEC_safe_insert_1. Update all users.
+ (vec_t::ordered_remove): Rename from VEC_ordered_remove_1.
+ Update all users.
+ (vec_t::unordered_remove): Rename from VEC_unordered_remove_1.
+ Update all users.
+ (vec_t::block_remove): Rename from VEC_block_remove_1. Update all users.
+ (vec_t::lower_bound): Rename from VEC_lower_bound_1. Update all users.
+
+2012-09-04 Steven Bosscher <steven@gcc.gnu.org>
+
+ * gimple.h (gimple_build_switch): Remove.
+ (gimple_build_switch_vec): Promote to the new gimple_build_switch.
+ (gimple_switch_default_label): Assert the default case label is
+ really a default case label.
+ (gimple_switch_set_default_label): Likewise.
+ * gimple.c (gimple_build_switch_nlabels): Make sure a default label
+ is passed in, and simplify accordingly.
+ (gimple_build_switch): Removed.
+ (gimple_build_switch_vec): Rename to gimple_build_switch.
+ * gimplify.c (gimplify_switch_expr): Update gimple_build_switch use.
+ * gimple-pretty-print.c (dump_gimple_switch): Do not accept a NULL
+ case label.
+ * stmt.c (expand_case): Simplify using the fact that every GIMPLE
+ switch must have a default case.
+ * tree-cfg.c (group_case_labels_stmt): Likewise.
+ (verify_gimple_switch): Use gimple_switch_label in verifier to get
+ the label at index 0, and verify that it is a valid default case.
+ * except.c (sjlj_emit_dispatch_table): Rewrite construction of the
+ switch for dispatching.
+ * tree-eh.c (lower_try_finally_switch): Update gimple_build_switch use.
+ (lower_eh_dispatch): Likewise.
+ * tree-vrp.c (execute_vrp): Use gimple_switch_label to get the case
+ label at index 0 before turning it into a default case label.
+ * omp-low.c (expand_omp_sections): Update gimple_build_switch use.
+ * tree-switch-conversion.c (emit_case_bit_tests): Get the default case
+ label using gimple_switch_default_label.
+ (collect_switch_conv_info): Likewise.
+ (process_switch): Likewise.
+ * doc/gimple.texi: Update documentation of gimple_build_switch.
+
+2012-09-04 Georg-Johann Lay <avr@gjlay.de>
+
+ PR target/54476
+ * config/avr/avr.c (avr_expand_delay_cycles): Mask operand with
+ SImode.
+
+2012-09-04 Bin Cheng <bin.cheng@arm.com>
+
+ PR target/45070
+ * config/arm/arm.c (thumb1_extra_regs_pushed): Handle return value of size
+ less than 4 bytes by using macro ARM_NUM_INTS.
+ (thumb1_unexpanded_epilogue): Use macro ARM_NUM_INTS.
+
+2012-09-04 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/54458
+ * tree-ssa-threadupdate.c (thread_through_loop_header): If we
+ turn the loop into one with multiple latches mark it so.
+
+2012-09-04 Senthil Kumar Selvaraj <senthil_kumar.selvaraj@atmel.com>
+
+ PR target/54220
+ * config/avr/avr.c (TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS): New
+ define to...
+ (avr_allocate_stack_slots_for_args): ...this new static function.
+
+2012-09-04 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * config/arm/arm.c (arm_expand_builtin): Replace gen_rtx_CONST_INT
+ by GEN_INT.
+ (arm_emit_coreregs_64bit_shift): Likewise.
+
+2012-09-04 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * config/arm/arm.c (arm_evpc_neon_vext): New
+ function.
+ (arm_expand_vec_perm_const_1): Add call to
+ arm_evpc_neon_vext.
+
+2012-09-04 Oleg Endo <olegendo@gcc.gnu.org>
+
+ PR target/51244
+ * config/sh/sh.c (prepare_cbranch_operands): Pull out comparison
+ canonicalization code into...
+ * (sh_canonicalize_comparison): This new function.
+ * config/sh/sh-protos.h: Declare it.
+ * config/sh/sh.h: Use it in new macro CANONICALIZE_COMPARISON.
+ * config/sh/sh.md (cbranchsi4): Remove TARGET_CBRANCHDI4 check and
+ always invoke expand_cbranchsi4.
+
+2012-09-03 Andi Kleen <ak@linux.intel.com>
+
+ * tree-ssa-sccvn.c (vn_reference_fold_indirect): Initialize
+ addr_offset always.
+
+2012-09-03 Andrew Pinski <apinski@cavium.com>
+
+ PR tree-opt/53395
+ * tree-if-conv.c (constant_or_ssa_name): New function.
+ (fold_build_cond_expr): New function.
+ (predicate_scalar_phi): Use fold_build_cond_expr instead of build3.
+ (predicate_mem_writes): Likewise.
+
+2012-09-03 Marc Glisse <marc.glisse@inria.fr>
+
+ * fold-const.c (fold_ternary_loc): Constant-propagate after
+ removing dead operands.
+
+2012-09-03 Michael Matz <matz@suse.de>
+
+ PR tree-optimization/46590
+ * tree-cfg.c (gimple_duplicate_sese_region): Don't update
+ SSA web here ...
+ * tree-ssa-loop-ch.c (copy_loop_headers): ... but here.
+
+2012-09-03 Sandra Loosemore <sandra@codesourcery.com>
+
+ * doc/invoke.texi: Fix numerous typos and punctuation/grammatical
+ errors throughout the file. Re-word some awkward sentences and
+ paragraphs.
+
+2012-09-03 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/54362
+ * tree-ssa-structalias.c (find_func_aliases): Handle COND_EXPR.
+
+2012-09-03 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
+
+ * config/s390/s390.c (s390_loadrelative_operand_p): New function.
+ (s390_check_qrst_address, print_operand_address): Use
+ s390_loadrelative_operand_p instead of s390_symref_operand_p.
+ (s390_check_symref_alignment): Accept pointer size alignment for
+ GOT slots.
+ (legitimize_pic_address): Use load relative on z10 or later.
+
+2012-09-03 Jakub Jelinek <jakub@redhat.com>
+
+ PR debug/53923
+ * valtrack.c (dead_debug_insert_temp): Drop non-reg uses
+ from the chain.
+
+2012-09-03 Joseph Myers <joseph@codesourcery.com>
+
+ * common.opt (--no-sysroot-suffix): New driver option.
+ * doc/invoke.texi (--no-sysroot-suffix): Document.
+ * gcc.c (driver_handle_option): Handle --no-sysroot-suffix as not
+ needing spec processing.
+ (main): Do not process sysroot suffixes if no_sysroot_suffix.
+
2012-09-02 Oleg Endo <olegendo@gcc.gnu.org>
PR target/33135
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 384fc2b6acb..abe4c3a7b99 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20120903
+20120905
diff --git a/gcc/alias.c b/gcc/alias.c
index c1d5c8a8f36..b7182074c5d 100644
--- a/gcc/alias.c
+++ b/gcc/alias.c
@@ -2177,12 +2177,18 @@ memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
storeqi_unaligned pattern. */
/* Read dependence: X is read after read in MEM takes place. There can
- only be a dependence here if both reads are volatile. */
+ only be a dependence here if both reads are volatile, or if either is
+ an explicit barrier. */
int
read_dependence (const_rtx mem, const_rtx x)
{
- return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
+ if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
+ return true;
+ if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
+ || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
+ return true;
+ return false;
}
/* Return true if we can determine that the fields referenced cannot
diff --git a/gcc/basic-block.h b/gcc/basic-block.h
index 09b5eb0a77c..288127f5660 100644
--- a/gcc/basic-block.h
+++ b/gcc/basic-block.h
@@ -31,6 +31,7 @@ along with GCC; see the file COPYING3. If not see
flow graph is manipulated by various optimizations. A signed type
makes those easy to detect. */
typedef HOST_WIDEST_INT gcov_type;
+typedef unsigned HOST_WIDEST_INT gcov_type_unsigned;
/* Control flow edge information. */
struct GTY((user)) edge_def {
@@ -91,6 +92,16 @@ enum cfg_edge_flags {
profile.c. */
extern const struct gcov_ctr_summary *profile_info;
+/* Working set size statistics for a given percentage of the entire
+ profile (sum_all from the counter summary). */
+typedef struct gcov_working_set_info
+{
+ /* Number of hot counters included in this working set. */
+ unsigned num_counters;
+ /* Smallest counter included in this working set. */
+ gcov_type min_counter;
+} gcov_working_set_t;
+
/* Declared in cfgloop.h. */
struct loop;
@@ -897,4 +908,7 @@ extern void rtl_profile_for_bb (basic_block);
extern void rtl_profile_for_edge (edge);
extern void default_rtl_profile (void);
+/* In profile.c. */
+extern gcov_working_set_t *find_working_set(unsigned pct_times_10);
+
#endif /* GCC_BASIC_BLOCK_H */
diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c
index c282fb2fe30..3ebfa139d30 100644
--- a/gcc/bb-reorder.c
+++ b/gcc/bb-reorder.c
@@ -2037,6 +2037,65 @@ insert_section_boundary_note (void)
}
}
+static bool
+gate_handle_reorder_blocks (void)
+{
+ if (targetm.cannot_modify_jumps_p ())
+ return false;
+ /* Don't reorder blocks when optimizing for size because extra jump insns may
+ be created; also barrier may create extra padding.
+
+ More correctly we should have a block reordering mode that tried to
+ minimize the combined size of all the jumps. This would more or less
+ automatically remove extra jumps, but would also try to use more short
+ jumps instead of long jumps. */
+ if (!optimize_function_for_speed_p (cfun))
+ return false;
+ return (optimize > 0
+ && (flag_reorder_blocks || flag_reorder_blocks_and_partition));
+}
+
+static unsigned int
+rest_of_handle_reorder_blocks (void)
+{
+ basic_block bb;
+
+ /* Last attempt to optimize CFG, as scheduling, peepholing and insn
+ splitting possibly introduced more crossjumping opportunities. */
+ cfg_layout_initialize (CLEANUP_EXPENSIVE);
+
+ reorder_basic_blocks ();
+ cleanup_cfg (CLEANUP_EXPENSIVE);
+
+ FOR_EACH_BB (bb)
+ if (bb->next_bb != EXIT_BLOCK_PTR)
+ bb->aux = bb->next_bb;
+ cfg_layout_finalize ();
+
+ /* Add NOTE_INSN_SWITCH_TEXT_SECTIONS notes. */
+ insert_section_boundary_note ();
+ return 0;
+}
+
+struct rtl_opt_pass pass_reorder_blocks =
+{
+ {
+ RTL_PASS,
+ "bbro", /* name */
+ gate_handle_reorder_blocks, /* gate */
+ rest_of_handle_reorder_blocks, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_REORDER_BLOCKS, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_verify_rtl_sharing, /* todo_flags_finish */
+ }
+};
+
/* Duplicate the blocks containing computed gotos. This basically unfactors
computed gotos that were factored early on in the compilation process to
speed up edge based data flow. We used to not unfactoring them again,
@@ -2178,6 +2237,21 @@ struct rtl_opt_pass pass_duplicate_computed_gotos =
}
};
+static bool
+gate_handle_partition_blocks (void)
+{
+ /* The optimization to partition hot/cold basic blocks into separate
+ sections of the .o file does not work well with linkonce or with
+ user defined section attributes. Don't call it if either case
+ arises. */
+ return (flag_reorder_blocks_and_partition
+ && optimize
+ /* See gate_handle_reorder_blocks. We should not partition if
+ we are going to omit the reordering. */
+ && optimize_function_for_speed_p (cfun)
+ && !DECL_ONE_ONLY (current_function_decl)
+ && !user_defined_section_attribute);
+}
/* This function is the main 'entrance' for the optimization that
partitions hot and cold basic blocks into separate sections of the
@@ -2346,83 +2420,6 @@ partition_hot_cold_basic_blocks (void)
return TODO_verify_flow | TODO_verify_rtl_sharing;
}
-
-static bool
-gate_handle_reorder_blocks (void)
-{
- if (targetm.cannot_modify_jumps_p ())
- return false;
- /* Don't reorder blocks when optimizing for size because extra jump insns may
- be created; also barrier may create extra padding.
-
- More correctly we should have a block reordering mode that tried to
- minimize the combined size of all the jumps. This would more or less
- automatically remove extra jumps, but would also try to use more short
- jumps instead of long jumps. */
- if (!optimize_function_for_speed_p (cfun))
- return false;
- return (optimize > 0
- && (flag_reorder_blocks || flag_reorder_blocks_and_partition));
-}
-
-
-/* Reorder basic blocks. */
-static unsigned int
-rest_of_handle_reorder_blocks (void)
-{
- basic_block bb;
-
- /* Last attempt to optimize CFG, as scheduling, peepholing and insn
- splitting possibly introduced more crossjumping opportunities. */
- cfg_layout_initialize (CLEANUP_EXPENSIVE);
-
- reorder_basic_blocks ();
- cleanup_cfg (CLEANUP_EXPENSIVE);
-
- FOR_EACH_BB (bb)
- if (bb->next_bb != EXIT_BLOCK_PTR)
- bb->aux = bb->next_bb;
- cfg_layout_finalize ();
-
- /* Add NOTE_INSN_SWITCH_TEXT_SECTIONS notes. */
- insert_section_boundary_note ();
- return 0;
-}
-
-struct rtl_opt_pass pass_reorder_blocks =
-{
- {
- RTL_PASS,
- "bbro", /* name */
- gate_handle_reorder_blocks, /* gate */
- rest_of_handle_reorder_blocks, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_REORDER_BLOCKS, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_verify_rtl_sharing, /* todo_flags_finish */
- }
-};
-
-static bool
-gate_handle_partition_blocks (void)
-{
- /* The optimization to partition hot/cold basic blocks into separate
- sections of the .o file does not work well with linkonce or with
- user defined section attributes. Don't call it if either case
- arises. */
- return (flag_reorder_blocks_and_partition
- && optimize
- /* See gate_handle_reorder_blocks. We should not partition if
- we are going to omit the reordering. */
- && optimize_function_for_speed_p (cfun)
- && !DECL_ONE_ONLY (current_function_decl)
- && !user_defined_section_attribute);
-}
struct rtl_opt_pass pass_partition_blocks =
{
diff --git a/gcc/common.opt b/gcc/common.opt
index 894236dd80f..50e5f33adb2 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -291,6 +291,9 @@ Driver Alias(no-canonical-prefixes)
-no-standard-libraries
Driver Alias(nostdlib)
+-no-sysroot-suffix
+Driver Var(no_sysroot_suffix)
+
-no-warnings
Common Alias(w)
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 9ce3c0f068a..4001f350f2f 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -20959,7 +20959,7 @@ arm_expand_builtin (tree exp,
else if (icode == CODE_FOR_iwmmxt_tinsrw && (selector < 0 ||selector > 1))
error ("the range of selector should be in 0 to 1");
mask <<= selector;
- op2 = gen_rtx_CONST_INT (SImode, mask);
+ op2 = GEN_INT (mask);
}
if (target == 0
|| GET_MODE (target) != tmode
@@ -21862,7 +21862,7 @@ thumb1_extra_regs_pushed (arm_stack_offsets *offsets, bool for_prologue)
unsigned long l_mask = live_regs_mask & (for_prologue ? 0x40ff : 0xff);
/* Then count how many other high registers will need to be pushed. */
unsigned long high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
- int n_free, reg_base;
+ int n_free, reg_base, size;
if (!for_prologue && frame_pointer_needed)
amount = offsets->locals_base - offsets->saved_regs;
@@ -21901,7 +21901,8 @@ thumb1_extra_regs_pushed (arm_stack_offsets *offsets, bool for_prologue)
n_free = 0;
if (!for_prologue)
{
- reg_base = arm_size_return_regs () / UNITS_PER_WORD;
+ size = arm_size_return_regs ();
+ reg_base = ARM_NUM_INTS (size);
live_regs_mask >>= reg_base;
}
@@ -21955,8 +21956,7 @@ thumb1_unexpanded_epilogue (void)
if (extra_pop > 0)
{
unsigned long extra_mask = (1 << extra_pop) - 1;
- live_regs_mask |= extra_mask << ((size + UNITS_PER_WORD - 1)
- / UNITS_PER_WORD);
+ live_regs_mask |= extra_mask << ARM_NUM_INTS (size);
}
/* The prolog may have pushed some high registers to use as
@@ -25937,6 +25937,72 @@ arm_evpc_neon_vtrn (struct expand_vec_perm_d *d)
return true;
}
+/* Recognize patterns for the VEXT insns. */
+
+static bool
+arm_evpc_neon_vext (struct expand_vec_perm_d *d)
+{
+ unsigned int i, nelt = d->nelt;
+ rtx (*gen) (rtx, rtx, rtx, rtx);
+ rtx offset;
+
+ unsigned int location;
+
+ unsigned int next = d->perm[0] + 1;
+
+ /* TODO: Handle GCC's numbering of elements for big-endian. */
+ if (BYTES_BIG_ENDIAN)
+ return false;
+
+ /* Check if the extracted indexes are increasing by one. */
+ for (i = 1; i < nelt; next++, i++)
+ {
+ /* If we hit the most significant element of the 2nd vector in
+ the previous iteration, no need to test further. */
+ if (next == 2 * nelt)
+ return false;
+
+ /* If we are operating on only one vector: it could be a
+ rotation. If there are only two elements of size < 64, let
+ arm_evpc_neon_vrev catch it. */
+ if (d->one_vector_p && (next == nelt))
+ {
+ if ((nelt == 2) && (d->vmode != V2DImode))
+ return false;
+ else
+ next = 0;
+ }
+
+ if (d->perm[i] != next)
+ return false;
+ }
+
+ location = d->perm[0];
+
+ switch (d->vmode)
+ {
+ case V16QImode: gen = gen_neon_vextv16qi; break;
+ case V8QImode: gen = gen_neon_vextv8qi; break;
+ case V4HImode: gen = gen_neon_vextv4hi; break;
+ case V8HImode: gen = gen_neon_vextv8hi; break;
+ case V2SImode: gen = gen_neon_vextv2si; break;
+ case V4SImode: gen = gen_neon_vextv4si; break;
+ case V2SFmode: gen = gen_neon_vextv2sf; break;
+ case V4SFmode: gen = gen_neon_vextv4sf; break;
+ case V2DImode: gen = gen_neon_vextv2di; break;
+ default:
+ return false;
+ }
+
+ /* Success! */
+ if (d->testing_p)
+ return true;
+
+ offset = GEN_INT (location);
+ emit_insn (gen (d->target, d->op0, d->op1, offset));
+ return true;
+}
+
/* The NEON VTBL instruction is a fully variable permuation that's even
stronger than what we expose via VEC_PERM_EXPR. What it doesn't do
is mask the index operand as VEC_PERM_EXPR requires. Therefore we
@@ -25976,6 +26042,12 @@ arm_evpc_neon_vtbl (struct expand_vec_perm_d *d)
static bool
arm_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
{
+ /* Check if the input mask matches vext before reordering the
+ operands. */
+ if (TARGET_NEON)
+ if (arm_evpc_neon_vext (d))
+ return true;
+
/* The pattern matching functions above are written to look for a small
number to begin the sequence (0, 1, N/2). If we begin with an index
from the second operand, we can swap the operands. */
@@ -26218,12 +26290,12 @@ arm_emit_coreregs_64bit_shift (enum rtx_code code, rtx out, rtx in,
/* Macros to make following code more readable. */
#define SUB_32(DEST,SRC) \
- gen_addsi3 ((DEST), (SRC), gen_rtx_CONST_INT (VOIDmode, -32))
+ gen_addsi3 ((DEST), (SRC), GEN_INT (-32))
#define RSB_32(DEST,SRC) \
- gen_subsi3 ((DEST), gen_rtx_CONST_INT (VOIDmode, 32), (SRC))
+ gen_subsi3 ((DEST), GEN_INT (32), (SRC))
#define SUB_S_32(DEST,SRC) \
gen_addsi3_compare0 ((DEST), (SRC), \
- gen_rtx_CONST_INT (VOIDmode, -32))
+ GEN_INT (-32))
#define SET(DEST,SRC) \
gen_rtx_SET (SImode, (DEST), (SRC))
#define SHIFT(CODE,SRC,AMOUNT) \
@@ -26259,7 +26331,7 @@ arm_emit_coreregs_64bit_shift (enum rtx_code code, rtx out, rtx in,
{
if (code == ASHIFTRT)
{
- rtx const31_rtx = gen_rtx_CONST_INT (VOIDmode, 31);
+ rtx const31_rtx = GEN_INT (31);
emit_insn (SET (out_down, SHIFT (code, in_up, const31_rtx)));
emit_insn (SET (out_up, SHIFT (code, in_up, const31_rtx)));
}
@@ -26271,8 +26343,7 @@ arm_emit_coreregs_64bit_shift (enum rtx_code code, rtx out, rtx in,
else if (INTVAL (amount) < 32)
{
/* Shifts by a constant less than 32. */
- rtx reverse_amount = gen_rtx_CONST_INT (VOIDmode,
- 32 - INTVAL (amount));
+ rtx reverse_amount = GEN_INT (32 - INTVAL (amount));
emit_insn (SET (out_down, LSHIFT (code, in_down, amount)));
emit_insn (SET (out_down,
@@ -26283,12 +26354,12 @@ arm_emit_coreregs_64bit_shift (enum rtx_code code, rtx out, rtx in,
else
{
/* Shifts by a constant greater than 31. */
- rtx adj_amount = gen_rtx_CONST_INT (VOIDmode, INTVAL (amount) - 32);
+ rtx adj_amount = GEN_INT (INTVAL (amount) - 32);
emit_insn (SET (out_down, SHIFT (code, in_up, adj_amount)));
if (code == ASHIFTRT)
emit_insn (gen_ashrsi3 (out_up, in_up,
- gen_rtx_CONST_INT (VOIDmode, 31)));
+ GEN_INT (31)));
else
emit_insn (SET (out_up, const0_rtx));
}
diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
index c17533000c7..95892ade448 100644
--- a/gcc/config/avr/avr.c
+++ b/gcc/config/avr/avr.c
@@ -700,6 +700,16 @@ avr_regs_to_save (HARD_REG_SET *set)
return count;
}
+
+/* Implement `TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS' */
+
+static bool
+avr_allocate_stack_slots_for_args (void)
+{
+ return !cfun->machine->is_naked;
+}
+
+
/* Return true if register FROM can be eliminated via register TO. */
static bool
@@ -10439,7 +10449,7 @@ avr_mem_clobber (void)
static void
avr_expand_delay_cycles (rtx operands0)
{
- unsigned HOST_WIDE_INT cycles = UINTVAL (operands0);
+ unsigned HOST_WIDE_INT cycles = UINTVAL (operands0) & GET_MODE_MASK (SImode);
unsigned HOST_WIDE_INT cycles_used;
unsigned HOST_WIDE_INT loop_count;
@@ -11339,6 +11349,9 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg,
#undef TARGET_CAN_ELIMINATE
#define TARGET_CAN_ELIMINATE avr_can_eliminate
+#undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
+#define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS avr_allocate_stack_slots_for_args
+
#undef TARGET_WARN_FUNC_RETURN
#define TARGET_WARN_FUNC_RETURN avr_warn_func_return
diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c
index d67c0eb5c57..976d4cbc8a7 100644
--- a/gcc/config/s390/s390.c
+++ b/gcc/config/s390/s390.c
@@ -2123,6 +2123,22 @@ s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
return true;
}
+/* Return TRUE if ADDR is an operand valid for a load/store relative
+ instructions. Be aware that the alignment of the operand needs to
+ be checked separately. */
+static bool
+s390_loadrelative_operand_p (rtx addr)
+{
+ if (GET_CODE (addr) == CONST)
+ addr = XEXP (addr, 0);
+
+ /* Enable load relative for symbol@GOTENT. */
+ if (GET_CODE (addr) == UNSPEC
+ && XINT (addr, 1) == UNSPEC_GOTENT)
+ return true;
+
+ return s390_symref_operand_p (addr, NULL, NULL);
+}
/* Return true if the address in OP is valid for constraint letter C
if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
@@ -2137,7 +2153,7 @@ s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
/* This check makes sure that no symbolic address (except literal
pool references) are accepted by the R or T constraints. */
- if (s390_symref_operand_p (op, NULL, NULL))
+ if (s390_loadrelative_operand_p (op))
return 0;
/* Ensure literal pool references are only accepted if LIT_POOL_OK. */
@@ -2941,6 +2957,13 @@ s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
HOST_WIDE_INT addend;
rtx symref;
+ /* Accept symbol@GOTENT with pointer size alignment. */
+ if (GET_CODE (addr) == CONST
+ && GET_CODE (XEXP (addr, 0)) == UNSPEC
+ && XINT (XEXP (addr, 0), 1) == UNSPEC_GOTENT
+ && alignment <= UNITS_PER_LONG)
+ return true;
+
if (!s390_symref_operand_p (addr, &symref, &addend))
return false;
@@ -3398,9 +3421,14 @@ legitimize_pic_address (rtx orig, rtx reg)
new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
new_rtx = gen_rtx_CONST (Pmode, new_rtx);
- emit_move_insn (temp, new_rtx);
- new_rtx = gen_const_mem (Pmode, temp);
+ if (!TARGET_Z10)
+ {
+ emit_move_insn (temp, new_rtx);
+ new_rtx = gen_const_mem (Pmode, temp);
+ }
+ else
+ new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
emit_move_insn (reg, new_rtx);
new_rtx = reg;
}
@@ -5250,7 +5278,7 @@ print_operand_address (FILE *file, rtx addr)
{
struct s390_address ad;
- if (s390_symref_operand_p (addr, NULL, NULL))
+ if (s390_loadrelative_operand_p (addr))
{
if (!TARGET_Z10)
{
diff --git a/gcc/config/sh/sh-protos.h b/gcc/config/sh/sh-protos.h
index 8cc5cc6b488..2bb318b1bc3 100644
--- a/gcc/config/sh/sh-protos.h
+++ b/gcc/config/sh/sh-protos.h
@@ -106,6 +106,9 @@ extern bool sh_expand_t_scc (rtx *);
extern rtx sh_gen_truncate (enum machine_mode, rtx, int);
extern bool sh_vector_mode_supported_p (enum machine_mode);
extern bool sh_cfun_trap_exit_p (void);
+extern void sh_canonicalize_comparison (enum rtx_code&, rtx&, rtx&,
+ enum machine_mode mode = VOIDmode);
+
#endif /* RTX_CODE */
extern const char *output_jump_label_table (void);
diff --git a/gcc/config/sh/sh.c b/gcc/config/sh/sh.c
index 3851ec6d50f..5055d1ff929 100644
--- a/gcc/config/sh/sh.c
+++ b/gcc/config/sh/sh.c
@@ -21,6 +21,12 @@ You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
+/* FIXME: This is a temporary hack, so that we can include <algorithm>
+ below. <algorithm> will try to include <cstdlib> which will reference
+ malloc & co, which are poisoned by "system.h". The proper solution is
+ to include <cstdlib> in "system.h" instead of <stdlib.h>. */
+#include <cstdlib>
+
#include "config.h"
#include "system.h"
#include "coretypes.h"
@@ -56,6 +62,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm-constrs.h"
#include "opts.h"
+#include <algorithm>
int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
@@ -1791,65 +1798,124 @@ prepare_move_operands (rtx operands[], enum machine_mode mode)
}
}
-enum rtx_code
-prepare_cbranch_operands (rtx *operands, enum machine_mode mode,
- enum rtx_code comparison)
+/* Implement the CANONICALIZE_COMPARISON macro for the combine pass.
+ This function is also re-used to canonicalize comparisons in cbranch
+ pattern expanders. */
+void
+sh_canonicalize_comparison (enum rtx_code& cmp, rtx& op0, rtx& op1,
+ enum machine_mode mode)
{
- rtx op1;
- rtx scratch = NULL_RTX;
+ /* When invoked from within the combine pass the mode is not specified,
+ so try to get it from one of the operands. */
+ if (mode == VOIDmode)
+ mode = GET_MODE (op0);
+ if (mode == VOIDmode)
+ mode = GET_MODE (op1);
- if (comparison == LAST_AND_UNUSED_RTX_CODE)
- comparison = GET_CODE (operands[0]);
- else
- scratch = operands[4];
- if (CONST_INT_P (operands[1])
- && !CONST_INT_P (operands[2]))
- {
- rtx tmp = operands[1];
+ // We need to have a mode to do something useful here.
+ if (mode == VOIDmode)
+ return;
+
+ // Currently, we don't deal with floats here.
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return;
- operands[1] = operands[2];
- operands[2] = tmp;
- comparison = swap_condition (comparison);
+ // Make sure that the constant operand is the second operand.
+ if (CONST_INT_P (op0) && !CONST_INT_P (op1))
+ {
+ std::swap (op0, op1);
+ cmp = swap_condition (cmp);
}
- if (CONST_INT_P (operands[2]))
+
+ if (CONST_INT_P (op1))
{
- HOST_WIDE_INT val = INTVAL (operands[2]);
- if ((val == -1 || val == -0x81)
- && (comparison == GT || comparison == LE))
+ /* Try to adjust the constant operand in such a way that available
+ comparison insns can be utilized better and the constant can be
+ loaded with a 'mov #imm,Rm' insn. This avoids a load from the
+ constant pool. */
+ const HOST_WIDE_INT val = INTVAL (op1);
+
+ /* x > -1 --> x >= 0
+ x > 0xFFFFFF7F --> x >= 0xFFFFFF80
+ x <= -1 --> x < 0
+ x <= 0xFFFFFF7F --> x < 0xFFFFFF80 */
+ if ((val == -1 || val == -0x81) && (cmp == GT || cmp == LE))
+ {
+ cmp = cmp == GT ? GE : LT;
+ op1 = gen_int_mode (val + 1, mode);
+ }
+
+ /* x >= 1 --> x > 0
+ x >= 0x80 --> x > 0x7F
+ x < 1 --> x <= 0
+ x < 0x80 --> x <= 0x7F */
+ else if ((val == 1 || val == 0x80) && (cmp == GE || cmp == LT))
{
- comparison = (comparison == GT) ? GE : LT;
- operands[2] = gen_int_mode (val + 1, mode);
+ cmp = cmp == GE ? GT : LE;
+ op1 = gen_int_mode (val - 1, mode);
}
- else if ((val == 1 || val == 0x80)
- && (comparison == GE || comparison == LT))
+
+ /* unsigned x >= 1 --> x != 0
+ unsigned x < 1 --> x == 0 */
+ else if (val == 1 && (cmp == GEU || cmp == LTU))
{
- comparison = (comparison == GE) ? GT : LE;
- operands[2] = gen_int_mode (val - 1, mode);
+ cmp = cmp == GEU ? NE : EQ;
+ op1 = CONST0_RTX (mode);
}
- else if (val == 1 && (comparison == GEU || comparison == LTU))
+
+ /* unsigned x >= 0x80 --> unsigned x > 0x7F
+ unsigned x < 0x80 --> unsigned x < 0x7F */
+ else if (val == 0x80 && (cmp == GEU || cmp == LTU))
{
- comparison = (comparison == GEU) ? NE : EQ;
- operands[2] = CONST0_RTX (mode);
+ cmp = cmp == GEU ? GTU : LEU;
+ op1 = gen_int_mode (val - 1, mode);
}
- else if (val == 0x80 && (comparison == GEU || comparison == LTU))
+
+ /* unsigned x > 0 --> x != 0
+ unsigned x <= 0 --> x == 0 */
+ else if (val == 0 && (cmp == GTU || cmp == LEU))
+ cmp = cmp == GTU ? NE : EQ;
+
+ /* unsigned x > 0x7FFFFFFF --> signed x < 0
+ unsigned x <= 0x7FFFFFFF --> signed x >= 0 */
+ else if (mode == SImode && (cmp == GTU || cmp == LEU)
+ && val == 0x7FFFFFFF)
{
- comparison = (comparison == GEU) ? GTU : LEU;
- operands[2] = gen_int_mode (val - 1, mode);
+ cmp = cmp == GTU ? LT : GE;
+ op1 = const0_rtx;
}
- else if (val == 0 && (comparison == GTU || comparison == LEU))
- comparison = (comparison == GTU) ? NE : EQ;
- else if (mode == SImode
- && ((val == 0x7fffffff
- && (comparison == GTU || comparison == LEU))
- || ((unsigned HOST_WIDE_INT) val
- == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
- && (comparison == GEU || comparison == LTU))))
+
+ /* unsigned x >= 0x80000000 --> signed x < 0
+ unsigned x < 0x80000000 --> signed x >= 0 */
+ else if (mode == SImode && (cmp == GEU || cmp == LTU)
+ && (unsigned HOST_WIDE_INT)val
+ == ((unsigned HOST_WIDE_INT)0x7FFFFFFF + 1))
{
- comparison = (comparison == GTU || comparison == GEU) ? LT : GE;
- operands[2] = CONST0_RTX (mode);
+ cmp = cmp == GEU ? LT : GE;
+ op1 = const0_rtx;
}
}
- op1 = operands[1];
+}
+
+enum rtx_code
+prepare_cbranch_operands (rtx *operands, enum machine_mode mode,
+ enum rtx_code comparison)
+{
+ /* The scratch reg is only available when this is invoked from within
+ the cbranchdi4_i splitter, through expand_cbranchdi4. */
+ rtx scratch = NULL_RTX;
+
+ if (comparison == LAST_AND_UNUSED_RTX_CODE)
+ comparison = GET_CODE (operands[0]);
+ else
+ scratch = operands[4];
+
+ sh_canonicalize_comparison (comparison, operands[1], operands[2], mode);
+
+ /* Notice that this function is also invoked after reload by
+ the cbranchdi4_i pattern, through expand_cbranchdi4. */
+ rtx op1 = operands[1];
+
if (can_create_pseudo_p ())
operands[1] = force_reg (mode, op1);
/* When we are handling DImode comparisons, we want to keep constants so
@@ -1883,8 +1949,6 @@ void
expand_cbranchsi4 (rtx *operands, enum rtx_code comparison, int probability)
{
rtx (*branch_expander) (rtx, rtx) = gen_branch_true;
- rtx jump;
-
comparison = prepare_cbranch_operands (operands, SImode, comparison);
switch (comparison)
{
@@ -1896,10 +1960,9 @@ expand_cbranchsi4 (rtx *operands, enum rtx_code comparison, int probability)
emit_insn (gen_rtx_SET (VOIDmode, get_t_reg_rtx (),
gen_rtx_fmt_ee (comparison, SImode,
operands[1], operands[2])));
- jump = emit_jump_insn (branch_expander (operands[3], get_t_reg_rtx ()));
+ rtx jump = emit_jump_insn (branch_expander (operands[3], get_t_reg_rtx ()));
if (probability >= 0)
add_reg_note (jump, REG_BR_PROB, GEN_INT (probability));
-
}
/* ??? How should we distribute probabilities when more than one branch
@@ -1956,8 +2019,7 @@ expand_cbranchdi4 (rtx *operands, enum rtx_code comparison)
lsw_taken = EQ;
if (prob >= 0)
{
- /* If we had more precision, we'd use rev_prob - (rev_prob >> 32) .
- */
+ // If we had more precision, we'd use rev_prob - (rev_prob >> 32) .
msw_skip_prob = rev_prob;
if (REG_BR_PROB_BASE <= 65535)
lsw_taken_prob = prob ? REG_BR_PROB_BASE : 0;
diff --git a/gcc/config/sh/sh.h b/gcc/config/sh/sh.h
index af7fe0bf545..b36287276aa 100644
--- a/gcc/config/sh/sh.h
+++ b/gcc/config/sh/sh.h
@@ -1946,6 +1946,10 @@ struct sh_args {
leave this zero for correct SH3 code. */
#define SHIFT_COUNT_TRUNCATED (! TARGET_SH3 && ! TARGET_SH2A)
+/* CANONICALIZE_COMPARISON macro for the combine pass. */
+#define CANONICALIZE_COMPARISON(CODE, OP0, OP1) \
+ sh_canonicalize_comparison ((CODE), (OP0), (OP1))
+
/* All integers have the same format so truncation is easy. */
/* But SHmedia must sign-extend DImode when truncating to SImode. */
#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) \
diff --git a/gcc/config/sh/sh.md b/gcc/config/sh/sh.md
index d69d2947ebb..8b44fbda496 100644
--- a/gcc/config/sh/sh.md
+++ b/gcc/config/sh/sh.md
@@ -881,10 +881,9 @@
if (TARGET_SHMEDIA)
emit_jump_insn (gen_cbranchint4_media (operands[0], operands[1],
operands[2], operands[3]));
- else if (TARGET_CBRANCHDI4)
- expand_cbranchsi4 (operands, LAST_AND_UNUSED_RTX_CODE, -1);
else
- sh_emit_compare_and_branch (operands, SImode);
+ expand_cbranchsi4 (operands, LAST_AND_UNUSED_RTX_CODE, -1);
+
DONE;
})
diff --git a/gcc/coverage.c b/gcc/coverage.c
index b4d22dfd9c6..f9b12e8b6f6 100644
--- a/gcc/coverage.c
+++ b/gcc/coverage.c
@@ -248,6 +248,13 @@ read_counts_file (void)
summary.ctrs[ix].run_max = sum.ctrs[ix].run_max;
summary.ctrs[ix].sum_max += sum.ctrs[ix].sum_max;
}
+ if (new_summary)
+ memcpy (summary.ctrs[GCOV_COUNTER_ARCS].histogram,
+ sum.ctrs[GCOV_COUNTER_ARCS].histogram,
+ sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
+ else
+ gcov_histogram_merge (summary.ctrs[GCOV_COUNTER_ARCS].histogram,
+ sum.ctrs[GCOV_COUNTER_ARCS].histogram);
new_summary = 0;
}
else if (GCOV_TAG_IS_COUNTER (tag) && fn_ident)
@@ -268,8 +275,9 @@ read_counts_file (void)
entry->ctr = elt.ctr;
entry->lineno_checksum = lineno_checksum;
entry->cfg_checksum = cfg_checksum;
- entry->summary = summary.ctrs[elt.ctr];
- entry->summary.num = n_counts;
+ if (elt.ctr < GCOV_COUNTERS_SUMMABLE)
+ entry->summary = summary.ctrs[elt.ctr];
+ entry->summary.num = n_counts;
entry->counts = XCNEWVEC (gcov_type, n_counts);
}
else if (entry->lineno_checksum != lineno_checksum
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index c36ed8888dd..629ab57b0e2 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,27 @@
+2012-09-04 Jason Merrill <jason@redhat.com>
+
+ PR c++/54441
+ * decl.c (reshape_init_class): Handle invalid initializer for
+ 0-length array member.
+
+ * error.c (dump_type_suffix): Correct handling of 0-length arrays.
+
+ PR c++/54420
+ * cp-tree.h (LAMBDANAME_P): Remove.
+ (LAMBDA_TYPE_P): Check CLASSTYPE_LAMBDA_EXPR instead.
+ * cp-lang.c (cxx_dwarf_name): Likewise.
+ * error.c (dump_aggr_type): Likewise.
+ * semantics.c (begin_lambda_type): Set CLASSTYPE_LAMBDA_EXPR sooner.
+
+ PR c++/54198
+ * decl.c (check_default_argument): Set cp_unevaluated_operand
+ around call to perform_implicit_conversion_flags.
+
+ PR c++/54437
+ PR c++/51213
+ * pt.c (fn_type_unification): Call coerce_template_parms before
+ entering substitution context.
+
2012-08-31 Paolo Carlini <paolo.carlini@oracle.com>
Jason Merrill <jason@redhat.com>
diff --git a/gcc/cp/cp-lang.c b/gcc/cp/cp-lang.c
index da7f1e1f7ed..0e90ab2a80d 100644
--- a/gcc/cp/cp-lang.c
+++ b/gcc/cp/cp-lang.c
@@ -115,7 +115,7 @@ cxx_dwarf_name (tree t, int verbosity)
gcc_assert (DECL_P (t));
if (DECL_NAME (t)
- && (ANON_AGGRNAME_P (DECL_NAME (t)) || LAMBDANAME_P (DECL_NAME (t))))
+ && (ANON_AGGRNAME_P (DECL_NAME (t)) || LAMBDA_TYPE_P (t)))
return NULL;
if (verbosity >= 2)
return decl_as_dwarf_string (t,
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index bd57b92c961..fa3d7b0b774 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -621,7 +621,7 @@ struct GTY (()) tree_trait_expr {
/* Based off of TYPE_ANONYMOUS_P. */
#define LAMBDA_TYPE_P(NODE) \
- (CLASS_TYPE_P (NODE) && LAMBDANAME_P (TYPE_LINKAGE_IDENTIFIER (NODE)))
+ (CLASS_TYPE_P (NODE) && CLASSTYPE_LAMBDA_EXPR (NODE))
/* Test if FUNCTION_DECL is a lambda function. */
#define LAMBDA_FUNCTION_P(FNDECL) \
@@ -4329,10 +4329,6 @@ extern GTY(()) VEC(tree,gc) *local_classes;
#define LAMBDANAME_PREFIX "__lambda"
#define LAMBDANAME_FORMAT LAMBDANAME_PREFIX "%d"
-#define LAMBDANAME_P(ID_NODE) \
- (!strncmp (IDENTIFIER_POINTER (ID_NODE), \
- LAMBDANAME_PREFIX, \
- sizeof (LAMBDANAME_PREFIX) - 1))
#define UDLIT_OP_ANSI_PREFIX "operator\"\" "
#define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s"
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 8b94e26ac0c..b665fe8ca19 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -5094,6 +5094,7 @@ reshape_init_class (tree type, reshape_iter *d, bool first_initializer_p,
while (d->cur != d->end)
{
tree field_init;
+ constructor_elt *old_cur = d->cur;
/* Handle designated initializers, as an extension. */
if (d->cur->index)
@@ -5130,6 +5131,15 @@ reshape_init_class (tree type, reshape_iter *d, bool first_initializer_p,
if (field_init == error_mark_node)
return error_mark_node;
+ if (d->cur->index && d->cur == old_cur)
+ {
+ /* This can happen with an invalid initializer for a flexible
+ array member (c++/54441). */
+ if (complain & tf_error)
+ error ("invalid initializer for %q#D", field);
+ return error_mark_node;
+ }
+
CONSTRUCTOR_APPEND_ELT (CONSTRUCTOR_ELTS (new_init), field, field_init);
/* [dcl.init.aggr]
@@ -10575,8 +10585,10 @@ check_default_argument (tree decl, tree arg)
A default argument expression is implicitly converted to the
parameter type. */
+ ++cp_unevaluated_operand;
perform_implicit_conversion_flags (decl_type, arg, tf_warning_or_error,
LOOKUP_NORMAL);
+ --cp_unevaluated_operand;
if (warn_zero_as_null_pointer_constant
&& c_inhibit_evaluation_warnings == 0
diff --git a/gcc/cp/error.c b/gcc/cp/error.c
index 7d60fe00c18..1872d01a249 100644
--- a/gcc/cp/error.c
+++ b/gcc/cp/error.c
@@ -657,7 +657,7 @@ dump_aggr_type (tree t, int flags)
else
pp_printf (pp_base (cxx_pp), M_("<anonymous %s>"), variety);
}
- else if (LAMBDANAME_P (name))
+ else if (LAMBDA_TYPE_P (name))
{
/* A lambda's "type" is essentially its signature. */
pp_string (cxx_pp, M_("<lambda"));
@@ -846,7 +846,9 @@ dump_type_suffix (tree t, int flags)
{
tree dtype = TYPE_DOMAIN (t);
tree max = TYPE_MAX_VALUE (dtype);
- if (host_integerp (max, 0))
+ if (integer_all_onesp (max))
+ pp_character (cxx_pp, '0');
+ else if (host_integerp (max, 0))
pp_wide_integer (cxx_pp, tree_low_cst (max, 0) + 1);
else if (TREE_CODE (max) == MINUS_EXPR)
dump_expr (TREE_OPERAND (max, 0),
diff --git a/gcc/cp/name-lookup.c b/gcc/cp/name-lookup.c
index 22bc5e7c006..0211d4fa3d0 100644
--- a/gcc/cp/name-lookup.c
+++ b/gcc/cp/name-lookup.c
@@ -1998,10 +1998,9 @@ make_anon_name (void)
}
/* This code is practically identical to that for creating
- anonymous names, but is just used for lambdas instead. This is necessary
- because anonymous names are recognized and cannot be passed to template
- functions. */
-/* FIXME is this still necessary? */
+ anonymous names, but is just used for lambdas instead. This isn't really
+ necessary, but it's convenient to avoid treating lambdas like other
+ anonymous types. */
static GTY(()) int lambda_cnt = 0;
diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c
index 4a3942715b8..6f6235c009d 100644
--- a/gcc/cp/pt.c
+++ b/gcc/cp/pt.c
@@ -14591,11 +14591,22 @@ fn_type_unification (tree fn,
static int deduction_depth;
struct pending_template *old_last_pend = last_pending_template;
struct tinst_level *old_error_tinst = last_error_tinst_level;
+ tree tparms = DECL_INNERMOST_TEMPLATE_PARMS (fn);
tree tinst;
tree r = error_mark_node;
- if (excessive_deduction_depth)
- return error_mark_node;
+ /* Adjust any explicit template arguments before entering the
+ substitution context. */
+ if (explicit_targs)
+ {
+ explicit_targs
+ = (coerce_template_parms (tparms, explicit_targs, NULL_TREE,
+ complain,
+ /*require_all_args=*/false,
+ /*use_default_args=*/false));
+ if (explicit_targs == error_mark_node)
+ return error_mark_node;
+ }
/* In C++0x, it's possible to have a function template whose type depends
on itself recursively. This is most obvious with decltype, but can also
@@ -14608,6 +14619,8 @@ fn_type_unification (tree fn,
substitutions back up to the initial one.
This is, of course, not reentrant. */
+ if (excessive_deduction_depth)
+ return error_mark_node;
tinst = build_tree_list (fn, targs);
if (!push_tinst_level (tinst))
{
@@ -14640,23 +14653,10 @@ fn_type_unification (tree fn,
specified template argument values. If a substitution in a
template parameter or in the function type of the function
template results in an invalid type, type deduction fails. */
- tree tparms = DECL_INNERMOST_TEMPLATE_PARMS (fn);
int i, len = TREE_VEC_LENGTH (tparms);
location_t loc = input_location;
- tree converted_args;
bool incomplete = false;
- if (explicit_targs == error_mark_node)
- goto fail;
-
- converted_args
- = (coerce_template_parms (tparms, explicit_targs, NULL_TREE,
- complain,
- /*require_all_args=*/false,
- /*use_default_args=*/false));
- if (converted_args == error_mark_node)
- goto fail;
-
/* Substitute the explicit args into the function type. This is
necessary so that, for instance, explicitly declared function
arguments can match null pointed constants. If we were given
@@ -14667,7 +14667,7 @@ fn_type_unification (tree fn,
{
tree parm = TREE_VALUE (TREE_VEC_ELT (tparms, i));
bool parameter_pack = false;
- tree targ = TREE_VEC_ELT (converted_args, i);
+ tree targ = TREE_VEC_ELT (explicit_targs, i);
/* Dig out the actual parm. */
if (TREE_CODE (parm) == TYPE_DECL
@@ -14705,7 +14705,7 @@ fn_type_unification (tree fn,
processing_template_decl += incomplete;
input_location = DECL_SOURCE_LOCATION (fn);
- fntype = tsubst (TREE_TYPE (fn), converted_args,
+ fntype = tsubst (TREE_TYPE (fn), explicit_targs,
complain | tf_partial, NULL_TREE);
input_location = loc;
processing_template_decl -= incomplete;
@@ -14714,8 +14714,8 @@ fn_type_unification (tree fn,
goto fail;
/* Place the explicitly specified arguments in TARGS. */
- for (i = NUM_TMPL_ARGS (converted_args); i--;)
- TREE_VEC_ELT (targs, i) = TREE_VEC_ELT (converted_args, i);
+ for (i = NUM_TMPL_ARGS (explicit_targs); i--;)
+ TREE_VEC_ELT (targs, i) = TREE_VEC_ELT (explicit_targs, i);
}
/* Never do unification on the 'this' parameter. */
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index 183a78a7e4f..f64246d82d6 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -8746,6 +8746,10 @@ begin_lambda_type (tree lambda)
/* Designate it as a struct so that we can use aggregate initialization. */
CLASSTYPE_DECLARED_CLASS (type) = false;
+ /* Cross-reference the expression and the type. */
+ LAMBDA_EXPR_CLOSURE (lambda) = type;
+ CLASSTYPE_LAMBDA_EXPR (type) = lambda;
+
/* Clear base types. */
xref_basetypes (type, /*bases=*/NULL_TREE);
@@ -8754,10 +8758,6 @@ begin_lambda_type (tree lambda)
if (type == error_mark_node)
return error_mark_node;
- /* Cross-reference the expression and the type. */
- LAMBDA_EXPR_CLOSURE (lambda) = type;
- CLASSTYPE_LAMBDA_EXPR (type) = lambda;
-
return type;
}
diff --git a/gcc/doc/gimple.texi b/gcc/doc/gimple.texi
index fa31eb00ea0..f4a65478d13 100644
--- a/gcc/doc/gimple.texi
+++ b/gcc/doc/gimple.texi
@@ -2034,21 +2034,12 @@ Set @code{RETVAL} to be the return value for @code{GIMPLE_RETURN} @code{G}.
@subsection @code{GIMPLE_SWITCH}
@cindex @code{GIMPLE_SWITCH}
-@deftypefn {GIMPLE function} gimple gimple_build_switch (unsigned nlabels, @
-tree index, tree default_label, ...)
-Build a @code{GIMPLE_SWITCH} statement. @code{NLABELS} are the number of
-labels excluding the default label. The default label is passed
-in @code{DEFAULT_LABEL}. The rest of the arguments are trees
-representing the labels. Each label is a tree of code
-@code{CASE_LABEL_EXPR}.
-@end deftypefn
-
-@deftypefn {GIMPLE function} gimple gimple_build_switch_vec (tree index, tree @
+@deftypefn {GIMPLE function} gimple gimple_build_switch (tree index, tree @
default_label, @code{VEC}(tree,heap) *args)
-This function is an alternate way of building @code{GIMPLE_SWITCH}
-statements. @code{INDEX} and @code{DEFAULT_LABEL} are as in
-gimple_build_switch. @code{ARGS} is a vector of @code{CASE_LABEL_EXPR} trees
-that contain the labels.
+Build a @code{GIMPLE_SWITCH} statement. @code{INDEX} is the index variable
+to switch on, and @code{DEFAULT_LABEL} represents the default label.
+@code{ARGS} is a vector of @code{CASE_LABEL_EXPR} trees that contain the
+non-default case labels. Each label is a tree of code @code{CASE_LABEL_EXPR}.
@end deftypefn
@deftypefn {GIMPLE function} unsigned gimple_switch_num_labels (gimple g)
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 5dfe7f478f6..973c65c694f 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -458,7 +458,7 @@ Objective-C and Objective-C++ Dialects}.
@xref{Directory Options,,Options for Directory Search}.
@gccoptlist{-B@var{prefix} -I@var{dir} -iplugindir=@var{dir} @gol
-iquote@var{dir} -L@var{dir} -specs=@var{file} -I- @gol
---sysroot=@var{dir}}
+--sysroot=@var{dir} --no-sysroot-suffix}
@item Machine Dependent Options
@xref{Submodel Options,,Hardware Models and Configurations}.
@@ -1239,7 +1239,7 @@ Input files that don't require preprocessing are ignored.
@cindex output file option
@item -o @var{file}
@opindex o
-Place output in file @var{file}. This applies regardless to whatever
+Place output in file @var{file}. This applies to whatever
sort of output is being produced, whether it be an executable file,
an object file, an assembler file or preprocessed C code.
@@ -1335,7 +1335,7 @@ following the original option, such as: @samp{-o output-file}.
@end table
Thus for example to display all the undocumented target-specific
-switches supported by the compiler the following can be used:
+switches supported by the compiler, use:
@smallexample
--help=target,undocumented
@@ -1354,10 +1354,10 @@ The argument to @option{--help=} should not consist solely of inverted
qualifiers.
Combining several classes is possible, although this usually
-restricts the output by so much that there is nothing to display. One
-case where it does work however is when one of the classes is
-@var{target}. So for example to display all the target-specific
-optimization options the following can be used:
+restricts the output so much that there is nothing to display. One
+case where it does work, however, is when one of the classes is
+@var{target}. For example, to display all the target-specific
+optimization options, use:
@smallexample
--help=target,optimizers
@@ -1471,10 +1471,10 @@ with the name @command{gcc}).
@findex g++
@findex c++
However, the use of @command{gcc} does not add the C++ library.
-@command{g++} is a program that calls GCC and treats @samp{.c},
+@command{g++} is a program that calls GCC and automatically specifies linking
+against the C++ library. It treats @samp{.c},
@samp{.h} and @samp{.i} files as C++ source files instead of C source
-files unless @option{-x} is used, and automatically specifies linking
-against the C++ library. This program is also useful when
+files unless @option{-x} is used. This program is also useful when
precompiling a C header file with a @samp{.h} extension for use in C++
compilations. On many systems, @command{g++} is also installed with
the name @command{c++}.
@@ -1555,9 +1555,9 @@ using GNU extensions that do not contradict it. For example,
incompatible with ISO C90, such as the @code{asm} and @code{typeof}
keywords, but not other GNU extensions that do not have a meaning in
ISO C90, such as omitting the middle term of a @code{?:}
-expression. On the other hand, by specifying a GNU dialect of a
-standard, all features the compiler support are enabled, even when
-those features change the meaning of the base standard and some
+expression. On the other hand, when a GNU dialect of a standard is
+specified, all features supported by the compiler are enabled, even when
+those features change the meaning of the base standard. As a result, some
strict-conforming programs may be rejected. The particular standard
is used by @option{-Wpedantic} to identify which features are GNU
extensions given that version of the standard. For example
@@ -1721,7 +1721,7 @@ information about that function to warn about problems with calls to
that function, or to generate more efficient code, even if the
resulting code still contains calls to that function. For example,
warnings are given with @option{-Wformat} for bad calls to
-@code{printf}, when @code{printf} is built in, and @code{strlen} is
+@code{printf} when @code{printf} is built in and @code{strlen} is
known not to modify global memory.
With the @option{-fno-builtin-@var{function}} option
@@ -1742,7 +1742,7 @@ built-in functions selectively when using @option{-fno-builtin} or
@opindex fhosted
@cindex hosted environment
-Assert that compilation takes place in a hosted environment. This implies
+Assert that compilation targets a hosted environment. This implies
@option{-fbuiltin}. A hosted environment is one in which the
entire standard library is available, and in which @code{main} has a return
type of @code{int}. Examples are nearly everything except a kernel.
@@ -1752,7 +1752,7 @@ This is equivalent to @option{-fno-freestanding}.
@opindex ffreestanding
@cindex hosted environment
-Assert that compilation takes place in a freestanding environment. This
+Assert that compilation targets a freestanding environment. This
implies @option{-fno-builtin}. A freestanding environment
is one in which the standard library may not exist, and program startup may
not necessarily be at @code{main}. The most obvious example is an OS kernel.
@@ -1891,7 +1891,7 @@ basic integer types such as @code{int} are signed types.
@cindex C++ options, command-line
@cindex options, C++
This section describes the command-line options that are only meaningful
-for C++ programs; but you can also use most of the GNU compiler options
+for C++ programs. You can also use most of the GNU compiler options
regardless of what language your program is in. For example, you
might compile a file @code{firstClass.C} like this:
@@ -1910,14 +1910,15 @@ Here is a list of options that are @emph{only} for compiling C++ programs:
@item -fabi-version=@var{n}
@opindex fabi-version
-Use version @var{n} of the C++ ABI@. Version 2 is the version of the
-C++ ABI that first appeared in G++ 3.4. Version 1 is the version of
-the C++ ABI that first appeared in G++ 3.2. Version 0 will always be
-the version that conforms most closely to the C++ ABI specification.
-Therefore, the ABI obtained using version 0 will change as ABI bugs
-are fixed.
+Use version @var{n} of the C++ ABI@. The default is version 2.
-The default is version 2.
+Version 0 refers to the version conforming most closely to
+the C++ ABI specification. Therefore, the ABI obtained using version 0
+will change in different versions of G++ as ABI bugs are fixed.
+
+Version 1 is the version of the C++ ABI that first appeared in G++ 3.2.
+
+Version 2 is the version of the C++ ABI that first appeared in G++ 3.4.
Version 3 corrects an error in mangling a constant address as a
template argument.
@@ -2026,7 +2027,7 @@ a @i{for-init-statement} extends to the end of the enclosing scope,
as was the case in old versions of G++, and other (traditional)
implementations of C++.
-The default if neither flag is given to follow the standard,
+If neither flag is given, the default is to follow the standard,
but to allow and give a warning for old-style code that would
otherwise be invalid, or have different behavior.
@@ -2517,7 +2518,7 @@ Warn about violations of the following style guidelines from Scott Meyers'
@itemize @bullet
@item
Item 11: Define a copy constructor and an assignment operator for classes
-with dynamically allocated memory.
+with dynamically-allocated memory.
@item
Item 12: Prefer initialization to assignment in constructors.
@@ -2553,11 +2554,11 @@ to filter out those warnings.
@item -Wstrict-null-sentinel @r{(C++ and Objective-C++ only)}
@opindex Wstrict-null-sentinel
@opindex Wno-strict-null-sentinel
-Warn also about the use of an uncasted @code{NULL} as sentinel. When
+Warn about the use of an uncasted @code{NULL} as sentinel. When
compiling only with GCC this is a valid sentinel, as @code{NULL} is defined
-to @code{__null}. Although it is a null pointer constant not a null pointer,
-it is guaranteed to be of the same size as a pointer. But this use is
-not portable across different compilers.
+to @code{__null}. Although it is a null pointer constant rather than a
+null pointer, it is guaranteed to be of the same size as a pointer.
+But this use is not portable across different compilers.
@item -Wno-non-template-friend @r{(C++ and Objective-C++ only)}
@opindex Wno-non-template-friend
@@ -2656,7 +2657,7 @@ languages themselves. @xref{Standards,,Language Standards
Supported by GCC}, for references.)
This section describes the command-line options that are only meaningful
-for Objective-C and Objective-C++ programs, but you can also use most of
+for Objective-C and Objective-C++ programs. You can also use most of
the language-independent GNU compiler options.
For example, you might compile a file @code{some_class.m} like this:
@@ -2903,7 +2904,7 @@ line.
@item -fdiagnostics-show-location=once
@opindex fdiagnostics-show-location
Only meaningful in line-wrapping mode. Instructs the diagnostic messages
-reporter to emit @emph{once} source location information; that is, in
+reporter to emit source location information @emph{once}; that is, in
case the message is too long to fit on a single physical line and has to
be wrapped, the source location won't be emitted (as prefix) again,
over and over, in subsequent continuation lines. This is the default
@@ -2974,10 +2975,10 @@ Make all warnings into errors.
@opindex Werror=
@opindex Wno-error=
Make the specified warning into an error. The specifier for a warning
-is appended, for example @option{-Werror=switch} turns the warnings
+is appended; for example @option{-Werror=switch} turns the warnings
controlled by @option{-Wswitch} into errors. This switch takes a
negative form, to be used to negate @option{-Werror} for specific
-warnings, for example @option{-Wno-error=switch} makes
+warnings; for example @option{-Wno-error=switch} makes
@option{-Wswitch} warnings not be errors, even when @option{-Werror}
is in effect.
@@ -3000,12 +3001,12 @@ messages.
@end table
-You can request many specific warnings with options beginning
+You can request many specific warnings with options beginning with
@samp{-W}, for example @option{-Wimplicit} to request warnings on
implicit declarations. Each of these specific warning options also
has a negative form beginning @samp{-Wno-} to turn off warnings; for
example, @option{-Wno-implicit}. This manual lists only one of the
-two forms, whichever is not the default. For further,
+two forms, whichever is not the default. For further
language-specific options also refer to @ref{C++ Dialect Options} and
@ref{Objective-C and Objective-C++ Dialect Options}.
@@ -3082,7 +3083,7 @@ Options} and @ref{Objective-C and Objective-C++ Dialect Options}.
-Warray-bounds @r{(only with} @option{-O2}@r{)} @gol
-Wc++11-compat @gol
-Wchar-subscripts @gol
--Wenum-compare @r{(in C/Objc; this is on by default in C++)} @gol
+-Wenum-compare @r{(in C/ObjC; this is on by default in C++)} @gol
-Wimplicit-int @r{(C and Objective-C only)} @gol
-Wimplicit-function-declaration @r{(C and Objective-C only)} @gol
-Wcomment @gol
@@ -3164,7 +3165,7 @@ conditional expression.
@samp{register}.
@item
-(C++ only) A base class is not initialized in a derived class' copy
+(C++ only) A base class is not initialized in a derived class's copy
constructor.
@end itemize
@@ -3481,7 +3482,7 @@ This warning is enabled by @option{-Wall}.
Warn about code that may have undefined semantics because of violations
of sequence point rules in the C and C++ standards.
-The C and C++ standards defines the order in which expressions in a C/C++
+The C and C++ standards define the order in which expressions in a C/C++
program are evaluated in terms of @dfn{sequence points}, which represent
a partial ordering between the execution of parts of the program: those
executed before the sequence point, and those executed after it. These
@@ -3524,12 +3525,12 @@ This warning is enabled by @option{-Wall} for C and C++.
@item -Wreturn-type
@opindex Wreturn-type
@opindex Wno-return-type
-Warn whenever a function is defined with a return-type that defaults
+Warn whenever a function is defined with a return type that defaults
to @code{int}. Also warn about any @code{return} statement with no
-return-value in a function whose return-type is not @code{void}
+return value in a function whose return type is not @code{void}
(falling off the end of the function body is considered returning
without a value), and about a @code{return} statement with an
-expression in a function whose return-type is @code{void}.
+expression in a function whose return type is @code{void}.
For C++, a function without return type always produces a diagnostic
message, even when @option{-Wno-return-type} is specified. The only
@@ -3883,7 +3884,7 @@ Warn about functions that might be candidates for attributes
@code{pure}, @code{const} or @code{noreturn}. The compiler only warns for
functions visible in other compilation units or (in the case of @code{pure} and
@code{const}) if it cannot prove that the function returns normally. A function
-returns normally if it doesn't contain an infinite loop nor returns abnormally
+returns normally if it doesn't contain an infinite loop or return abnormally
by throwing, calling @code{abort()} or trapping. This analysis requires option
@option{-fipa-pure-const}, which is enabled by default at @option{-O} and
higher. Higher optimization levels improve the accuracy of the analysis.
@@ -3983,7 +3984,7 @@ equivalent, and/or problematic constructs that should be avoided.
@item
Macro parameters that appear within string literals in the macro body.
In traditional C macro replacement takes place within string literals,
-but does not in ISO C@.
+but in ISO C it does not.
@item
In traditional C, some preprocessor directives did not exist.
@@ -4160,10 +4161,11 @@ such assumptions.
@item -Wno-pedantic-ms-format @r{(MinGW targets only)}
@opindex Wno-pedantic-ms-format
@opindex Wpedantic-ms-format
-Disables the warnings about non-ISO @code{printf} / @code{scanf} format
-width specifiers @code{I32}, @code{I64}, and @code{I} used on Windows targets
-depending on the MS runtime, when you are using the options @option{-Wformat}
-and @option{-Wpedantic} without gnu-extensions.
+When used in combination with @option{-Wformat}
+and @option{-pedantic} without GNU extensions, this option
+disables the warnings about non-ISO @code{printf} / @code{scanf} format
+width specifiers @code{I32}, @code{I64}, and @code{I} used on Windows targets,
+which depend on the MS runtime.
@item -Wpointer-arith
@opindex Wpointer-arith
@@ -4638,9 +4640,9 @@ Suppress warnings from applying the @samp{offsetof} macro to a non-POD
type. According to the 1998 ISO C++ standard, applying @samp{offsetof}
to a non-POD type is undefined. In existing C++ implementations,
however, @samp{offsetof} typically gives meaningful results even when
-applied to certain kinds of non-POD types. (Such as a simple
+applied to certain kinds of non-POD types (such as a simple
@samp{struct} that fails to be a POD type only by virtue of having a
-constructor.) This flag is for users who are aware that they are
+constructor). This flag is for users who are aware that they are
writing nonportable code and who have deliberately chosen to ignore the
warning about it.
@@ -5433,7 +5435,7 @@ Dump after combining stack adjustments.
@opindex fdump-rtl-cse1
@opindex fdump-rtl-cse2
@option{-fdump-rtl-cse1} and @option{-fdump-rtl-cse2} enable dumping after
-the two common sub-expression elimination passes.
+the two common subexpression elimination passes.
@itemx -fdump-rtl-dce
@opindex fdump-rtl-dce
@@ -5741,7 +5743,7 @@ counters for each function compiled.
@opindex fdump-tree
Control the dumping at various stages of processing the intermediate
language tree to a file. The file name is generated by appending a
-switch specific suffix to the source file name, and the file is
+switch-specific suffix to the source file name, and the file is
created in the same directory as the output file. If the
@samp{-@var{options}} form is used, @var{options} is a list of
@samp{-} separated options which control the details of the dump. Not
@@ -6117,7 +6119,7 @@ Print the mapping from multilib directory names to compiler switches
that enable them. The directory name is separated from the switches by
@samp{;}, and each switch starts with an @samp{@@} instead of the
@samp{-}, without spaces between multiple switches. This is supposed to
-ease shell-processing.
+ease shell processing.
@item -print-multi-os-directory
@opindex print-multi-os-directory
@@ -6138,7 +6140,7 @@ Like @option{-print-file-name}, but searches for a program such as @samp{cpp}.
Same as @option{-print-file-name=libgcc.a}.
This is useful when you use @option{-nostdlib} or @option{-nodefaultlibs}
-but you do want to link with @file{libgcc.a}. You can do
+but you do want to link with @file{libgcc.a}. You can do:
@smallexample
gcc -nostdlib @var{files}@dots{} `gcc -print-libgcc-file-name`
@@ -6353,7 +6355,7 @@ optimizations designed to reduce code size.
@opindex Ofast
Disregard strict standards compliance. @option{-Ofast} enables all
@option{-O3} optimizations. It also enables optimizations that are not
-valid for all standard compliant programs.
+valid for all standard-compliant programs.
It turns on @option{-ffast-math} and the Fortran-specific
@option{-fno-protect-parens} and @option{-fstack-arrays}.
@@ -6627,7 +6629,7 @@ are initialized to zero into BSS@. This can save space in the resulting
code.
This option turns off this behavior because some programs explicitly
-rely on variables going to the data section. E.g., so that the
+rely on variables going to the data section---e.g., so that the
resulting executable can find the beginning of that section and/or make
assumptions based on that.
@@ -6661,7 +6663,7 @@ erroneously read data to propagate within a program.
@item -fthread-jumps
@opindex fthread-jumps
-Perform optimizations where we check to see if a jump branches to a
+Perform optimizations that check to see if a jump branches to a
location where another comparison subsumed by the first is found. If
so, the first branch is redirected to either the destination of the
second branch or a point immediately following it, depending on whether
@@ -6748,13 +6750,13 @@ Not enabled at any optimization level.
@item -fgcse-after-reload
@opindex fgcse-after-reload
When @option{-fgcse-after-reload} is enabled, a redundant load elimination
-pass is performed after reload. The purpose of this pass is to cleanup
+pass is performed after reload. The purpose of this pass is to clean up
redundant spilling.
@item -funsafe-loop-optimizations
@opindex funsafe-loop-optimizations
-If given, the loop optimizer assumes that loop indices do not
-overflow, and that the loops with nontrivial exit condition are not
+This option tells the loop optimizer to assume that loop indices do not
+overflow, and that loops with nontrivial exit condition are not
infinite. This enables a wider range of loop optimizations even if
the loop optimizer itself cannot prove that these assumptions are valid.
If you use @option{-Wunsafe-loop-optimizations}, the compiler warns you
@@ -6762,7 +6764,8 @@ if it finds this kind of loop.
@item -fcrossjumping
@opindex fcrossjumping
-Perform cross-jumping transformation. This transformation unifies equivalent code and save code size. The
+Perform cross-jumping transformation.
+This transformation unifies equivalent code and saves code size. The
resulting code may or may not perform better than without cross-jumping.
Enabled at levels @option{-O2}, @option{-O3}, @option{-Os}.
@@ -6787,7 +6790,7 @@ Enabled by default at @option{-O} and higher.
@item -fif-conversion
@opindex fif-conversion
Attempt to transform conditional jumps into branch-less equivalents. This
-include use of conditional moves, min, max, set flags and abs instructions, and
+includes use of conditional moves, min, max, set flags and abs instructions, and
some tricks doable by standard arithmetics. The use of conditional execution
on chips where it is available is controlled by @code{if-conversion2}.
@@ -6957,13 +6960,13 @@ with @option{-fschedule-insns} or at @option{-O2} or higher.
@item -fsched-pressure
@opindex fsched-pressure
-Enable register pressure sensitive insn scheduling before the register
+Enable register pressure sensitive insn scheduling before register
allocation. This only makes sense when scheduling before register
allocation is enabled, i.e.@: with @option{-fschedule-insns} or at
@option{-O2} or higher. Usage of this option can improve the
generated code and decrease its size by preventing register pressure
-increase above the number of available hard registers and as a
-consequence register spills in the register allocation.
+increase above the number of available hard registers and subsequent
+spills in register allocation.
@item -fsched-spec-load
@opindex fsched-spec-load
@@ -7002,9 +7005,9 @@ and only if @option{-fsched-stalled-insns} is used.
@item -fsched2-use-superblocks
@opindex fsched2-use-superblocks
-When scheduling after register allocation, do use superblock scheduling
-algorithm. Superblock scheduling allows motion across basic block boundaries
-resulting on faster schedules. This option is experimental, as not all machine
+When scheduling after register allocation, use superblock scheduling.
+This allows motion across basic block boundaries,
+resulting in faster schedules. This option is experimental, as not all machine
descriptions used by GCC model the CPU closely enough to avoid unreliable
results from the algorithm.
@@ -7059,9 +7062,9 @@ at @option{-O2} or higher.
@item -freschedule-modulo-scheduled-loops
@opindex freschedule-modulo-scheduled-loops
-The modulo scheduling comes before the traditional scheduling. If a loop
-is modulo scheduled you may want to prevent the later scheduling passes
-from changing its schedule; use this option to control that.
+Modulo scheduling is performed before traditional scheduling. If a loop
+is modulo scheduled, later scheduling passes may change its schedule.
+Use this option to control that behavior.
@item -fselective-scheduling
@opindex fselective-scheduling
@@ -7076,13 +7079,13 @@ scheduling runs instead of the second scheduler pass.
@item -fsel-sched-pipelining
@opindex fsel-sched-pipelining
Enable software pipelining of innermost loops during selective scheduling.
-This option has no effect until one of @option{-fselective-scheduling} or
+This option has no effect unless one of @option{-fselective-scheduling} or
@option{-fselective-scheduling2} is turned on.
@item -fsel-sched-pipelining-outer-loops
@opindex fsel-sched-pipelining-outer-loops
When pipelining loops during selective scheduling, also pipeline outer loops.
-This option has no effect until @option{-fsel-sched-pipelining} is turned on.
+This option has no effect unless @option{-fsel-sched-pipelining} is turned on.
@item -fshrink-wrap
@opindex fshrink-wrap
@@ -7169,7 +7172,7 @@ Enabled by default at @option{-O} and higher.
@item -fipa-reference
@opindex fipa-reference
-Discover which static variables do not escape cannot escape the
+Discover which static variables do not escape the
compilation unit.
Enabled by default at @option{-O} and higher.
@@ -7246,7 +7249,7 @@ default at @option{-O} and higher.
@item -ftree-builtin-call-dce
@opindex ftree-builtin-call-dce
-Perform conditional dead code elimination (DCE) for calls to builtin functions
+Perform conditional dead code elimination (DCE) for calls to built-in functions
that may set @code{errno} but are otherwise side-effect free. This flag is
enabled by default at @option{-O2} and higher if @option{-Os} is not also
specified.
@@ -7589,7 +7592,7 @@ enabled.
@opindex ftracer
Perform tail duplication to enlarge superblock size. This transformation
simplifies the control flow of the function allowing other optimizations to do
-better job.
+a better job.
@item -funroll-loops
@opindex funroll-loops
@@ -7607,14 +7610,14 @@ the loop is entered. This usually makes programs run more slowly.
@item -fsplit-ivs-in-unroller
@opindex fsplit-ivs-in-unroller
-Enables expressing of values of induction variables in later iterations
+Enables expression of values of induction variables in later iterations
of the unrolled loop using the value in the first iteration. This breaks
long dependency chains, thus improving efficiency of the scheduling passes.
-Combination of @option{-fweb} and CSE is often sufficient to obtain the
-same effect. However in cases the loop body is more complicated than
-a single basic block, this is not reliable. It also does not work at all
-on some of the architectures due to restrictions in the CSE pass.
+A combination of @option{-fweb} and CSE is often sufficient to obtain the
+same effect. However, that is not reliable in cases where the loop body
+is more complicated than a single basic block. It also does not work at all
+on some architectures due to restrictions in the CSE pass.
This optimization is enabled by default.
@@ -7706,7 +7709,7 @@ subsections @code{.text.hot} for most frequently executed functions and
the linker so object file format must support named sections and linker must
place them in a reasonable way.
-Also profile feedback must be available in to make this option effective. See
+Also profile feedback must be available to make this option effective. See
@option{-fprofile-arcs} for details.
Enabled at levels @option{-O2}, @option{-O3}, @option{-Os}.
@@ -8084,7 +8087,7 @@ You must prepend a @samp{+} to the command recipe in the parent Makefile
for this to work. This option likely only works if @env{MAKE} is
GNU make.
-This option is disabled by default
+This option is disabled by default.
@item -flto-partition=@var{alg}
@opindex flto-partition
@@ -8162,7 +8165,7 @@ Enabled at levels @option{-O}, @option{-O2}, @option{-O3}, @option{-Os}.
@item -fcprop-registers
@opindex fcprop-registers
After register allocation and post-register allocation instruction splitting,
-we perform a copy-propagation pass to try to reduce scheduling dependencies
+perform a copy-propagation pass to try to reduce scheduling dependencies
and occasionally eliminate the copy.
Enabled at levels @option{-O}, @option{-O2}, @option{-O3}, @option{-Os}.
@@ -8457,8 +8460,8 @@ After running a program compiled with @option{-fprofile-arcs}
(@pxref{Debugging Options,, Options for Debugging Your Program or
@command{gcc}}), you can compile it a second time using
@option{-fbranch-probabilities}, to improve optimizations based on
-the number of times each branch was taken. When the program
-compiled with @option{-fprofile-arcs} exits it saves arc execution
+the number of times each branch was taken. When a program
+compiled with @option{-fprofile-arcs} exits, it saves arc execution
counts to a file called @file{@var{sourcename}.gcda} for each source
file. The information in this data file is very dependent on the
structure of the generated code, so you must use the same source code
@@ -8483,12 +8486,12 @@ Enabled with @option{-fprofile-generate} and @option{-fprofile-use}.
@item -fvpt
@opindex fvpt
-If combined with @option{-fprofile-arcs}, it instructs the compiler to add
-a code to gather information about values of expressions.
+If combined with @option{-fprofile-arcs}, this option instructs the compiler
+to add code to gather information about values of expressions.
With @option{-fbranch-probabilities}, it reads back the data gathered
and actually performs the optimizations based on them.
-Currently the optimizations include specialization of division operation
+Currently the optimizations include specialization of division operations
using the knowledge about the value of the denominator.
@item -frename-registers
@@ -8506,7 +8509,7 @@ Enabled by default with @option{-funroll-loops} and @option{-fpeel-loops}.
@opindex ftracer
Perform tail duplication to enlarge superblock size. This transformation
simplifies the control flow of the function allowing other optimizations to do
-better job.
+a better job.
Enabled with @option{-fprofile-use}.
@@ -8516,7 +8519,7 @@ Unroll loops whose number of iterations can be determined at compile time or
upon entry to the loop. @option{-funroll-loops} implies
@option{-frerun-cse-after-loop}, @option{-fweb} and @option{-frename-registers}.
It also turns on complete loop peeling (i.e.@: complete removal of loops with
-small constant number of iterations). This option makes code larger, and may
+a small constant number of iterations). This option makes code larger, and may
or may not make it run faster.
Enabled with @option{-fprofile-use}.
@@ -8584,7 +8587,7 @@ threading.
@item -fbtr-bb-exclusive
@opindex fbtr-bb-exclusive
When performing branch target register load optimization, don't reuse
-branch target registers in within any basic block.
+branch target registers within any basic block.
@item -fstack-protector
@opindex fstack-protector
@@ -8642,7 +8645,7 @@ tied to the internals of the compiler, and are subject to change
without notice in future releases.
In each case, the @var{value} is an integer. The allowable choices for
-@var{name} are given in the following table:
+@var{name} are:
@table @gcctabopt
@item predictable-branch-outcome
@@ -8650,7 +8653,7 @@ When branch is predicted to be taken with probability lower than this threshold
(in percent), then it is considered well predictable. The default is 10.
@item max-crossjump-edges
-The maximum number of incoming edges to consider for crossjumping.
+The maximum number of incoming edges to consider for cross-jumping.
The algorithm used by @option{-fcrossjumping} is @math{O(N^2)} in
the number of edges incoming to each block. Increasing values mean
more aggressive optimization, making the compilation time increase with
@@ -8658,9 +8661,9 @@ probably small improvement in executable size.
@item min-crossjump-insns
The minimum number of instructions that must be matched at the end
-of two blocks before crossjumping is performed on them. This
+of two blocks before cross-jumping is performed on them. This
value is ignored in the case where all instructions in the block being
-crossjumped from are matched. The default value is 5.
+cross-jumped from are matched. The default value is 5.
@item max-grow-copy-bb-insns
The maximum code size expansion factor when copying basic blocks
@@ -8745,13 +8748,14 @@ the original size.
@item large-unit-insns
The limit specifying large translation unit. Growth caused by inlining of
units larger than this limit is limited by @option{--param inline-unit-growth}.
-For small units this might be too tight (consider unit consisting of function A
-that is inline and B that just calls A three time. If B is small relative to
+For small units this might be too tight.
+For example, consider a unit consisting of function A
+that is inline and B that just calls A three times. If B is small relative to
A, the growth of unit is 300\% and yet such inlining is very sane. For very
-large units consisting of small inlineable functions however the overall unit
+large units consisting of small inlineable functions, however, the overall unit
growth limit is needed to avoid exponential explosion of code size. Thus for
smaller units, the size is increased to @option{--param large-unit-insns}
-before applying @option{--param inline-unit-growth}. The default is 10000
+before applying @option{--param inline-unit-growth}. The default is 10000.
@item inline-unit-growth
Specifies maximal overall growth of the compilation unit caused by inlining.
@@ -8765,7 +8769,7 @@ unit growth to 1.1 times the original size.
@item large-stack-frame
The limit specifying large stack frames. While inlining the algorithm is trying
-to not grow past this limit too much. Default value is 256 bytes.
+to not grow past this limit too much. The default value is 256 bytes.
@item large-stack-frame-growth
Specifies maximal growth of large stack frames caused by inlining in percents.
@@ -8774,21 +8778,22 @@ the original size.
@item max-inline-insns-recursive
@itemx max-inline-insns-recursive-auto
-Specifies maximum number of instructions out-of-line copy of self recursive inline
+Specifies the maximum number of instructions an out-of-line copy of a
+self-recursive inline
function can grow into by performing recursive inlining.
-For functions declared inline @option{--param max-inline-insns-recursive} is
-taken into account. For function not declared inline, recursive inlining
+For functions declared inline, @option{--param max-inline-insns-recursive} is
+taken into account. For functions not declared inline, recursive inlining
happens only when @option{-finline-functions} (included in @option{-O3}) is
enabled and @option{--param max-inline-insns-recursive-auto} is used. The
default value is 450.
@item max-inline-recursive-depth
@itemx max-inline-recursive-depth-auto
-Specifies maximum recursion depth used by the recursive inlining.
+Specifies the maximum recursion depth used for recursive inlining.
-For functions declared inline @option{--param max-inline-recursive-depth} is
-taken into account. For function not declared inline, recursive inlining
+For functions declared inline, @option{--param max-inline-recursive-depth} is
+taken into account. For functions not declared inline, recursive inlining
happens only when @option{-finline-functions} (included in @option{-O3}) is
enabled and @option{--param max-inline-recursive-depth-auto} is used. The
default value is 8.
@@ -8800,20 +8805,21 @@ increasing the prologue size or complexity of function body to other
optimizers.
When profile feedback is available (see @option{-fprofile-generate}) the actual
-recursion depth can be guessed from probability that function will recurse via
-given call expression. This parameter limits inlining only to call expression
-whose probability exceeds given threshold (in percents). The default value is
-10.
+recursion depth can be guessed from probability that function recurses via a
+given call expression. This parameter limits inlining only to call expressions
+whose probability exceeds the given threshold (in percents).
+The default value is 10.
@item early-inlining-insns
-Specify growth that early inliner can make. In effect it increases amount of
-inlining for code having large abstraction penalty. The default value is 10.
+Specify growth that the early inliner can make. In effect it increases
+the amount of inlining for code having a large abstraction penalty.
+The default value is 10.
@item max-early-inliner-iterations
@itemx max-early-inliner-iterations
-Limit of iterations of early inliner. This basically bounds number of nested
-indirect calls early inliner can resolve. Deeper chains are still handled by
-late inlining.
+Limit of iterations of the early inliner. This basically bounds
+the number of nested indirect calls the early inliner can resolve.
+Deeper chains are still handled by late inlining.
@item comdat-sharing-probability
@itemx comdat-sharing-probability
@@ -8821,7 +8827,7 @@ Probability (in percent) that C++ inline function with comdat visibility
are shared across multiple compilation units. The default value is 20.
@item min-vect-loop-bound
-The minimum number of iterations under which a loop is not vectorized
+The minimum number of iterations under which loops are not vectorized
when @option{-ftree-vectorize} is used. The number of iterations after
vectorization needs to be greater than the value specified by this option
to allow vectorization. The default value is 0.
@@ -8858,21 +8864,21 @@ The maximum amount of iterations of the pass over the function. This is used to
limit compilation time in tree tail merging. The default value is 2.
@item max-unrolled-insns
-The maximum number of instructions that a loop should have if that loop
-is unrolled, and if the loop is unrolled, it determines how many times
+The maximum number of instructions that a loop may have to be unrolled.
+If a loop is unrolled, this parameter also determines how many times
the loop code is unrolled.
@item max-average-unrolled-insns
The maximum number of instructions biased by probabilities of their execution
-that a loop should have if that loop is unrolled, and if the loop is unrolled,
-it determines how many times the loop code is unrolled.
+that a loop may have to be unrolled. If a loop is unrolled,
+this parameter also determines how many times the loop code is unrolled.
@item max-unroll-times
The maximum number of unrollings of a single loop.
@item max-peeled-insns
-The maximum number of instructions that a loop should have if that loop
-is peeled, and if the loop is peeled, it determines how many times
+The maximum number of instructions that a loop may have to be peeled.
+If a loop is peeled, this parameter also determines how many times
the loop code is peeled.
@item max-peel-times
@@ -8897,19 +8903,19 @@ The maximum number of branches unswitched in a single loop.
The minimum cost of an expensive expression in the loop invariant motion.
@item iv-consider-all-candidates-bound
-Bound on number of candidates for induction variables below that
+Bound on number of candidates for induction variables, below which
all candidates are considered for each use in induction variable
-optimizations. Only the most relevant candidates are considered
-if there are more candidates, to avoid quadratic time complexity.
+optimizations. If there are more candidates than this,
+only the most relevant ones are considered to avoid quadratic time complexity.
@item iv-max-considered-uses
The induction variable optimizations give up on loops that contain more
induction variable uses.
@item iv-always-prune-cand-set-bound
-If number of candidates in the set is smaller than this value,
-we always try to remove unnecessary ivs from the set during its
-optimization when a new iv is added to the set.
+If the number of candidates in the set is smaller than this value,
+always try to remove unnecessary ivs from the set
+when adding a new one.
@item scev-max-expr-size
Bound on size of expressions used in the scalar evolutions analyzer.
@@ -8958,9 +8964,8 @@ doing loop versioning for alias in the vectorizer. See option
@option{-ftree-vect-loop-version} for more information.
@item max-iterations-to-track
-
-The maximum number of iterations of a loop the brute force algorithm
-for analysis of # of iterations of the loop tries to evaluate.
+The maximum number of iterations of a loop the brute-force algorithm
+for analysis of the number of iterations of the loop tries to evaluate.
@item hot-bb-count-fraction
Select fraction of the maximal count of repetitions of basic block in program
@@ -9023,10 +9028,12 @@ order to make tracer effective.
@item max-cse-path-length
-Maximum number of basic blocks on path that cse considers. The default is 10.
+The maximum number of basic blocks on path that CSE considers.
+The default is 10.
@item max-cse-insns
-The maximum instructions CSE process before flushing. The default is 1000.
+The maximum number of instructions CSE processes before flushing.
+The default is 1000.
@item ggc-min-expand
@@ -9076,13 +9083,13 @@ increase with probably slightly better performance. The default value is 500.
@item reorder-blocks-duplicate
@itemx reorder-blocks-duplicate-feedback
-Used by basic block reordering pass to decide whether to use unconditional
+Used by the basic block reordering pass to decide whether to use unconditional
branch or duplicate the code on its destination. Code is duplicated when its
estimated size is smaller than this value multiplied by the estimated size of
unconditional jump in the hot spots of the program.
The @option{reorder-block-duplicate-feedback} is used only when profile
-feedback is available and may be set to higher values than
+feedback is available. It may be set to higher values than
@option{reorder-block-duplicate} since information about the hot spots is more
accurate.
@@ -9173,7 +9180,7 @@ and 100 for @option{-Os}, @option{-O2}, and @option{-O3}.
@item prefetch-latency
Estimate on average number of instructions that are executed before
-prefetch finishes. The distance we prefetch ahead is proportional
+prefetch finishes. The distance prefetched ahead is proportional
to this constant. Increasing this number may also lead to less
streams being prefetched (see @option{simultaneous-prefetches}).
@@ -9431,7 +9438,7 @@ options instead.
@item -Xpreprocessor @var{option}
@opindex Xpreprocessor
Pass @var{option} as an option to the preprocessor. You can use this to
-supply system-specific preprocessor options that GCC does not know how to
+supply system-specific preprocessor options that GCC does not
recognize.
If you want to pass an option that takes an argument, you must use
@@ -9470,7 +9477,7 @@ contains commas, it is split into multiple options at the commas.
@item -Xassembler @var{option}
@opindex Xassembler
Pass @var{option} as an option to the assembler. You can use this to
-supply system-specific assembler options that GCC does not know how to
+supply system-specific assembler options that GCC does not
recognize.
If you want to pass an option that takes an argument, you must use
@@ -9554,7 +9561,9 @@ Only the libraries you specify are passed to the linker, and options
specifying linkage of the system libraries, such as @code{-static-libgcc}
or @code{-shared-libgcc}, are ignored.
The standard startup files are used normally, unless @option{-nostartfiles}
-is used. The compiler may generate calls to @code{memcmp},
+is used.
+
+The compiler may generate calls to @code{memcmp},
@code{memset}, @code{memcpy} and @code{memmove}.
These entries are usually resolved by entries in
libc. These entry points should be supplied through some other
@@ -9637,7 +9646,7 @@ is innocuous.}
@opindex shared-libgcc
@opindex static-libgcc
On systems that provide @file{libgcc} as a shared library, these options
-force the use of either the shared or static version respectively.
+force the use of either the shared or static version, respectively.
If no shared version of @file{libgcc} was built when the compiler was
configured, these options have no effect.
@@ -9847,6 +9856,15 @@ for this option. If your linker does not support this option, the
header file aspect of @option{--sysroot} still works, but the
library aspect does not.
+@item --no-sysroot-suffix
+@opindex no-sysroot-suffix
+For some targets, a suffix is added to the root directory specified
+with @option{--sysroot}, depending on the other options used, so that
+headers may for example be found in
+@file{@var{dir}/@var{suffix}/usr/include} instead of
+@file{@var{dir}/usr/include}. This option disables the addition of
+such a suffix.
+
@item -I-
@opindex I-
This option has been deprecated. Please use @option{-iquote} instead for
@@ -9945,7 +9963,7 @@ passed to the program @samp{z-compile}, which should be invoked with the
command-line switch @option{-input} and with the result of performing the
@samp{%i} substitution. (See below.)
-As an alternative to providing a spec string, the text that follows a
+As an alternative to providing a spec string, the text following a
suffix directive can be one of the following:
@table @code
@@ -10539,8 +10557,8 @@ That allows code to run on hardware variants that lack these registers.
@item -mprefer-short-insn-regs
@opindex mprefer-short-insn-regs
Preferrentially allocate registers that allow short instruction generation.
-This can result in increasesd instruction count, so if this reduces or
-increases code size might vary from case to case.
+This can result in increased instruction count, so this may either reduce or
+increase overall code size.
@item -mbranch-cost=@var{num}
@opindex mbranch-cost
@@ -10554,7 +10572,7 @@ Enable the generation of conditional moves.
@item -mnops=@var{num}
@opindex mnops
-Emit @var{num} nops before every other generated instruction.
+Emit @var{num} NOPs before every other generated instruction.
@item -mno-soft-cmpsf
@opindex mno-soft-cmpsf
@@ -10568,10 +10586,10 @@ software comparisons.
@item -mstack-offset=@var{num}
@opindex mstack-offset
Set the offset between the top of the stack and the stack pointer.
-E.g., a value of 8 means that the eight bytes in the range sp+0@dots{}sp+7
+E.g., a value of 8 means that the eight bytes in the range @code{sp+0@dots{}sp+7}
can be used by leaf functions without stack allocation.
Values other than @samp{8} or @samp{16} are untested and unlikely to work.
-Note also that this option changes the ABI, compiling a program with a
+Note also that this option changes the ABI; compiling a program with a
different stack offset than the libraries have been compiled with
generally does not work.
This option can be useful if you want to evaluate if a different stack
@@ -10725,13 +10743,13 @@ Pass floating-point arguments using the floating-point registers. This is
one of the variants of the APCS@. This option is recommended if the
target hardware has a floating-point unit or if a lot of floating-point
arithmetic is going to be performed by the code. The default is
-@option{-mno-apcs-float}, since integer only code is slightly increased in
-size if @option{-mapcs-float} is used.
+@option{-mno-apcs-float}, since the size of integer-only code is
+slightly increased if @option{-mapcs-float} is used.
@c not currently implemented
@item -mapcs-reentrant
@opindex mapcs-reentrant
-Generate reentrant, position independent code. The default is
+Generate reentrant, position-independent code. The default is
@option{-mno-apcs-reentrant}.
@end ignore
@@ -10906,7 +10924,7 @@ and 64. The default value varies for different toolchains. For the COFF
targeted toolchain the default value is 8. A value of 64 is only allowed
if the underlying ABI supports it.
-Specifying the larger number can produce faster, more efficient code, but
+Specifying a larger number can produce faster, more efficient code, but
can also increase the size of the program. Different values are potentially
incompatible. Code compiled with one value cannot necessarily expect to
work with code or libraries compiled with another value, if they exchange
@@ -11501,6 +11519,7 @@ can be one of @samp{bf512}, @samp{bf514}, @samp{bf516}, @samp{bf518},
@samp{bf542}, @samp{bf544}, @samp{bf547}, @samp{bf548}, @samp{bf549},
@samp{bf542m}, @samp{bf544m}, @samp{bf547m}, @samp{bf548m}, @samp{bf549m},
@samp{bf561}, @samp{bf592}.
+
The optional @var{sirevision} specifies the silicon revision of the target
Blackfin processor. Any workarounds available for the targeted silicon revision
are enabled. If @var{sirevision} is @samp{none}, no workarounds are enabled.
@@ -11513,13 +11532,15 @@ is not defined. If @var{sirevision} is @samp{any}, the
If this optional @var{sirevision} is not used, GCC assumes the latest known
silicon revision of the targeted Blackfin processor.
-Support for @samp{bf561} is incomplete. For @samp{bf561},
-Only the processor macro is defined.
-Without this option, @samp{bf532} is used as the processor by default.
-The corresponding predefined processor macros for @var{cpu} is to
-be defined. And for @samp{bfin-elf} toolchain, this causes the hardware BSP
+GCC defines a preprocessor macro for the specified @var{cpu}.
+For the @samp{bfin-elf} toolchain, this option causes the hardware BSP
provided by libgloss to be linked in if @option{-msim} is not given.
+Without this option, @samp{bf532} is used as the processor by default.
+
+Note that support for @samp{bf561} is incomplete. For @samp{bf561},
+only the preprocessor macro is defined.
+
@item -msim
@opindex msim
Specifies that the program will be run on the simulator. This causes
@@ -11580,7 +11601,7 @@ With a @samp{bfin-elf} target, this option implies @option{-msim}.
@item -mno-id-shared-library
@opindex mno-id-shared-library
-Generate code that doesn't assume ID based shared libraries are being used.
+Generate code that doesn't assume ID-based shared libraries are being used.
This is the default.
@item -mleaf-id-shared-library
@@ -11642,40 +11663,43 @@ not known to bind locally. It has no effect without @option{-mfdpic}.
@item -mmulticore
@opindex mmulticore
-Build standalone application for multicore Blackfin processor. Proper
-start files and link scripts are used to support multicore.
-This option defines @code{__BFIN_MULTICORE}. It can only be used with
-@option{-mcpu=bf561@r{[}-@var{sirevision}@r{]}}. It can be used with
-@option{-mcorea} or @option{-mcoreb}. If it's used without
-@option{-mcorea} or @option{-mcoreb}, single application/dual core
+Build a standalone application for multicore Blackfin processors.
+This option causes proper start files and link scripts supporting
+multicore to be used, and defines the macro @code{__BFIN_MULTICORE}.
+It can only be used with @option{-mcpu=bf561@r{[}-@var{sirevision}@r{]}}.
+
+This option can be used with @option{-mcorea} or @option{-mcoreb}, which
+selects the one-application-per-core programming model. Without
+@option{-mcorea} or @option{-mcoreb}, the single-application/dual-core
programming model is used. In this model, the main function of Core B
-should be named as coreb_main. If it's used with @option{-mcorea} or
-@option{-mcoreb}, one application per core programming model is used.
-If this option is not used, single core application programming
+should be named as @code{coreb_main}.
+
+If this option is not used, the single-core application programming
model is used.
@item -mcorea
@opindex mcorea
-Build standalone application for Core A of BF561 when using
-one application per core programming model. Proper start files
-and link scripts are used to support Core A. This option
-defines @code{__BFIN_COREA}. It must be used with @option{-mmulticore}.
+Build a standalone application for Core A of BF561 when using
+the one-application-per-core programming model. Proper start files
+and link scripts are used to support Core A, and the macro
+@code{__BFIN_COREA} is defined.
+This option can only be used in conjunction with @option{-mmulticore}.
@item -mcoreb
@opindex mcoreb
-Build standalone application for Core B of BF561 when using
-one application per core programming model. Proper start files
-and link scripts are used to support Core B. This option
-defines @code{__BFIN_COREB}. When this option is used, coreb_main
-should be used instead of main. It must be used with
-@option{-mmulticore}.
+Build a standalone application for Core B of BF561 when using
+the one-application-per-core programming model. Proper start files
+and link scripts are used to support Core B, and the macro
+@code{__BFIN_COREB} is defined. When this option is used, @code{coreb_main}
+should be used instead of @code{main}.
+This option can only be used in conjunction with @option{-mmulticore}.
@item -msdram
@opindex msdram
-Build standalone application for SDRAM. Proper start files and
-link scripts are used to put the application into SDRAM.
-Loader should initialize SDRAM before loading the application
-into SDRAM. This option defines @code{__BFIN_SDRAM}.
+Build a standalone application for SDRAM. Proper start files and
+link scripts are used to put the application into SDRAM, and the macro
+@code{__BFIN_SDRAM} is defined.
+The loader should initialize SDRAM before loading the application.
@item -micplb
@opindex micplb
@@ -11719,7 +11743,7 @@ pieces of data are @samp{.fardata}, @samp{.far} and @samp{.const}.
@item -msdata=all
@opindex msdata=all
-Put all data, not just small objets, into the sections reserved for
+Put all data, not just small objects, into the sections reserved for
small data, and use addressing relative to the @code{B14} register to
access them.
@@ -11777,7 +11801,7 @@ models where it applies. This option is active by default.
@item -mpdebug
@opindex mpdebug
Enable CRIS-specific verbose debug-related information in the assembly
-code. This option also has the effect to turn off the @samp{#NO_APP}
+code. This option also has the effect of turning off the @samp{#NO_APP}
formatted-code indicator to the assembler at the beginning of the
assembly file.
@@ -11788,7 +11812,7 @@ compare and test instructions before use of condition codes.
@item -mno-side-effects
@opindex mno-side-effects
-Do not emit instructions with side-effects in addressing modes other than
+Do not emit instructions with side effects in addressing modes other than
post-increment.
@item -mstack-align
@@ -11803,8 +11827,8 @@ post-increment.
@opindex mno-data-align
@opindex mconst-align
@opindex mno-const-align
-These options (no-options) arranges (eliminate arrangements) for the
-stack-frame, individual data and constants to be aligned for the maximum
+These options (@samp{no-} options) arrange (eliminate arrangements) for the
+stack frame, individual data and constants to be aligned for the maximum
single data access size for the chosen CPU model. The default is to
arrange for 32-bit alignment. ABI details such as structure layout are
not affected by these options.
@@ -11816,7 +11840,7 @@ not affected by these options.
@opindex m16-bit
@opindex m8-bit
Similar to the stack- data- and const-align options above, these options
-arrange for stack-frame, writable data and constants to all be 32-bit,
+arrange for stack frame, writable data and constants to all be 32-bit,
16-bit or 8-bit aligned. The default is 32-bit alignment.
@item -mno-prologue-epilogue
@@ -11828,7 +11852,7 @@ epilogue which set up the stack frame are omitted and no return
instructions or return sequences are generated in the code. Use this
option only together with visual inspection of the compiled code: no
warnings or errors are generated when call-saved registers must be saved,
-or storage for local variable needs to be allocated.
+or storage for local variables needs to be allocated.
@item -mno-gotplt
@itemx -mgotplt
@@ -11850,7 +11874,7 @@ Legacy no-op option only recognized with the cris-axis-linux-gnu target.
@item -sim
@opindex sim
-This option, recognized for the cris-axis-elf arranges
+This option, recognized for the cris-axis-elf, arranges
to link with input-output functions from a simulator library. Code,
initialized data and zero-initialized data are allocated consecutively.
@@ -12016,8 +12040,8 @@ switch to conform to a non-default data model.
@opindex mfix-and-continue
@opindex ffix-and-continue
@opindex findirect-data
-Generate code suitable for fast turn around development. Needed to
-enable GDB to dynamically load @code{.o} files into already running
+Generate code suitable for fast turnaround development, such as to
+allow GDB to dynamically load @code{.o} files into already-running
programs. @option{-findirect-data} and @option{-ffix-and-continue}
are provided for backwards compatibility.
@@ -12673,9 +12697,9 @@ within the 32-bit address space.
@item -malign-labels
@opindex malign-labels
-Try to align labels to an 8-byte boundary by inserting nops into the
+Try to align labels to an 8-byte boundary by inserting NOPs into the
previous packet. This option only has an effect when VLIW packing
-is enabled. It doesn't create new packets; it merely adds nops to
+is enabled. It doesn't create new packets; it merely adds NOPs to
existing ones.
@item -mlibrary-pic
@@ -12809,7 +12833,7 @@ in a future version.
@opindex moptimize-membar
This switch removes redundant @code{membar} instructions from the
-compiler generated code. It is enabled by default.
+compiler-generated code. It is enabled by default.
@item -mno-optimize-membar
@opindex mno-optimize-membar
@@ -12958,7 +12982,7 @@ Synonyms for @option{-march=1.0}, @option{-march=1.1}, and @option{-march=2.0} r
@item -mbig-switch
@opindex mbig-switch
Generate code suitable for big switch tables. Use this option only if
-the assembler/linker complain about out of range branches within a switch
+the assembler/linker complain about out-of-range branches within a switch
table.
@item -mjump-in-delay
@@ -12997,7 +13021,7 @@ functions.
@item -mfixed-range=@var{register-range}
@opindex mfixed-range
Generate code treating the given register range as fixed registers.
-A fixed register is one that the register allocator can not use. This is
+A fixed register is one that the register allocator cannot use. This is
useful when compiling kernel code. A register range is specified as
two registers separated by a dash. Multiple register ranges can be
specified separated by a comma.
@@ -14473,7 +14497,7 @@ be scheduled.
@subsection LM32 Options
@cindex LM32 options
-These @option{-m} options are defined for the Lattice Mico32 architecture:
+These @option{-m} options are defined for the LatticeMico32 architecture:
@table @gcctabopt
@item -mbarrel-shift-enabled
@@ -14526,8 +14550,7 @@ registers, so there is a tradeoff between GCC's ability to fit the
code into available registers, and the performance penalty of using
memory instead of registers. Note that all modules in a program must
be compiled with the same value for this option. Because of that, you
-must not use this option with the default runtime libraries gcc
-builds.
+must not use this option with GCC's default runtime libraries.
@end table
@@ -14599,7 +14622,7 @@ special instructions to reference them.
@opindex G
@cindex smaller data references
Put global and static objects less than or equal to @var{num} bytes
-into the small data or bss sections instead of the normal data or bss
+into the small data or BSS sections instead of the normal data or BSS
sections. The default value of @var{num} is 8.
The @option{-msdata} option must be set to one of @samp{sdata} or @samp{use}
for this option to have any effect.
@@ -14611,7 +14634,7 @@ generated.
@item -mdebug
@opindex mdebug
-Makes the M32R specific code in the compiler display some statistics
+Makes the M32R-specific code in the compiler display some statistics
that might help in debugging programs.
@item -malign-loops
@@ -14726,7 +14749,7 @@ where the value of @var{family} is given by the table above.
@item -mtune=@var{tune}
@opindex mtune
-Tune the code for a particular microarchitecture, within the
+Tune the code for a particular microarchitecture within the
constraints set by @option{-march} and @option{-mcpu}.
The M680x0 microarchitectures are: @samp{68000}, @samp{68010},
@samp{68020}, @samp{68030}, @samp{68040}, @samp{68060}
@@ -14979,7 +15002,7 @@ the system.
@item -msep-data
Generate code that allows the data segment to be located in a different
-area of memory from the text segment. This allows for execute in place in
+area of memory from the text segment. This allows for execute-in-place in
an environment without virtual memory management. This option implies
@option{-fPIC}.
@@ -14989,11 +15012,11 @@ This is the default.
@item -mid-shared-library
Generate code that supports shared libraries via the library ID method.
-This allows for execute in place and shared libraries in an environment
+This allows for execute-in-place and shared libraries in an environment
without virtual memory management. This option implies @option{-fPIC}.
@item -mno-id-shared-library
-Generate code that doesn't assume ID based shared libraries are being used.
+Generate code that doesn't assume ID-based shared libraries are being used.
This is the default.
@item -mshared-library-id=n
@@ -15062,13 +15085,13 @@ Use the divide instruction. (Enabled by default).
@itemx -mno-relax-immediate
@opindex mrelax-immediate
@opindex mno-relax-immediate
-Allow arbitrary sized immediates in bit operations.
+Allow arbitrary-sized immediates in bit operations.
@item -mwide-bitfields
@itemx -mno-wide-bitfields
@opindex mwide-bitfields
@opindex mno-wide-bitfields
-Always treat bit-fields as int-sized.
+Always treat bit-fields as @code{int}-sized.
@item -m4byte-functions
@itemx -mno-4byte-functions
@@ -15163,7 +15186,7 @@ useful unless you also provide @code{-mminmax}.
@item -mconfig=@var{name}
@opindex mconfig=
-Selects one of the build-in core configurations. Each MeP chip has
+Selects one of the built-in core configurations. Each MeP chip has
one or more modules in it; each module has a core CPU and a variety of
coprocessors, optional instructions, and peripherals. The
@code{MeP-Integrator} tool, not part of GCC, provides these
@@ -15302,7 +15325,7 @@ This option is deprecated. Use @option{-fno-zero-initialized-in-bss} instead.
@item -mcpu=@var{cpu-type}
@opindex mcpu=
-Use features of and schedule code for given CPU.
+Use features of, and schedule code for, the given CPU.
Supported values are in the format @samp{v@var{X}.@var{YY}.@var{Z}},
where @var{X} is a major version, @var{YY} is the minor version, and
@var{Z} is compatibility code. Example values are @samp{v3.00.a},
@@ -15357,7 +15380,7 @@ normal executable (default), uses startup code @file{crt0.o}.
@item xmdstub
for use with Xilinx Microprocessor Debugger (XMD) based
software intrusive debug agent called xmdstub. This uses startup file
-@file{crt1.o} and sets the start address of the program to be 0x800.
+@file{crt1.o} and sets the start address of the program to 0x800.
@item bootstrap
for applications that are loaded using a bootloader.
@@ -15788,8 +15811,8 @@ to generate shorter and faster references to symbolic addresses.
@item -G @var{num}
@opindex G
Put definitions of externally-visible data in a small data section
-if that data is no bigger than @var{num} bytes. GCC can then access
-the data more efficiently; see @option{-mgpopt} for details.
+if that data is no bigger than @var{num} bytes. GCC can then generate
+more efficient accesses to the data; see @option{-mgpopt} for details.
The default @option{-G} option depends on the configuration.
@@ -16046,7 +16069,7 @@ The workarounds for the division errata rely on special functions in
@file{libgcc.a}. At present, these functions are only provided by
the @code{mips64vr*-elf} configurations.
-Other VR4120 errata require a nop to be inserted between certain pairs of
+Other VR4120 errata require a NOP to be inserted between certain pairs of
instructions. These errata are handled by the assembler, not by GCC itself.
@item -mfix-vr4130
@@ -16210,7 +16233,7 @@ resolve the destination at link-time and if the destination is within
range for a direct call.
@option{-mrelax-pic-calls} is the default if GCC was configured to use
-an assembler and a linker that supports the @code{.reloc} assembly
+an assembler and a linker that support the @code{.reloc} assembly
directive and @code{-mexplicit-relocs} is in effect. With
@code{-mno-explicit-relocs}, this optimization can be performed by the
assembler and the linker alone without help from the compiler.
@@ -16766,11 +16789,11 @@ Generate VRSAVE instructions when generating AltiVec code.
@item -mgen-cell-microcode
@opindex mgen-cell-microcode
-Generate Cell microcode instructions
+Generate Cell microcode instructions.
@item -mwarn-cell-microcode
@opindex mwarn-cell-microcode
-Warning when a Cell microcode instruction is going to emitted. An example
+Warn when a Cell microcode instruction is emitted. An example
of a Cell microcode instruction is a variable shift.
@item -msecure-plt
@@ -17192,7 +17215,7 @@ separate groups. Insert @var{number} NOPs to force an insn to a new group.
@item -mcall-sysv
@opindex mcall-sysv
On System V.4 and embedded PowerPC systems compile code using calling
-conventions that adheres to the March 1995 draft of the System V
+conventions that adhere to the March 1995 draft of the System V
Application Binary Interface, PowerPC processor supplement. This is the
default unless you configured GCC using @samp{powerpc-*-eabiaix}.
@@ -17254,7 +17277,7 @@ ABI@.
@item -mabi=no-spe
@opindex mabi=no-spe
-Disable Booke SPE ABI extensions for the current ABI@.
+Disable Book-E SPE ABI extensions for the current ABI@.
@item -mabi=ibmlongdouble
@opindex mabi=ibmlongdouble
@@ -17272,7 +17295,7 @@ This is a PowerPC 32-bit Linux ABI option.
@opindex mno-prototype
On System V.4 and embedded PowerPC systems assume that all calls to
variable argument functions are properly prototyped. Otherwise, the
-compiler must insert an instruction before every non prototyped call to
+compiler must insert an instruction before every non-prototyped call to
set or clear bit 6 of the condition code register (@var{CR}) to
indicate whether floating-point values are passed in the floating-point
registers in case the function takes variable arguments. With
@@ -17319,14 +17342,14 @@ header to indicate that @samp{eabi} extended relocations are used.
@opindex meabi
@opindex mno-eabi
On System V.4 and embedded PowerPC systems do (do not) adhere to the
-Embedded Applications Binary Interface (eabi) which is a set of
+Embedded Applications Binary Interface (EABI), which is a set of
modifications to the System V.4 specifications. Selecting @option{-meabi}
means that the stack is aligned to an 8-byte boundary, a function
-@code{__eabi} is called to from @code{main} to set up the eabi
+@code{__eabi} is called from @code{main} to set up the EABI
environment, and the @option{-msdata} option can use both @code{r2} and
@code{r13} to point to two separate small data areas. Selecting
@option{-mno-eabi} means that the stack is aligned to a 16-byte boundary,
-do not call an initialization function from @code{main}, and the
+no EABI initialization function is called from @code{main}, and the
@option{-msdata} option only uses @code{r13} to point to a single
small data area. The @option{-meabi} option is on by default if you
configured GCC using one of the @samp{powerpc*-*-eabi*} options.
@@ -17388,8 +17411,8 @@ targets. The default value is target-specific.
@cindex smaller data references (PowerPC)
@cindex .sdata/.sdata2 references (PowerPC)
On embedded PowerPC systems, put global and static items less than or
-equal to @var{num} bytes into the small data or bss sections instead of
-the normal data or bss section. By default, @var{num} is 8. The
+equal to @var{num} bytes into the small data or BSS sections instead of
+the normal data or BSS section. By default, @var{num} is 8. The
@option{-G @var{num}} switch is also passed to the linker.
All modules should be compiled with the same @option{-G @var{num}} value.
@@ -17404,9 +17427,9 @@ names in the assembly language output using symbolic forms.
@itemx -mno-longcall
@opindex mlongcall
@opindex mno-longcall
-By default assume that all calls are far away so that a longer more
+By default assume that all calls are far away so that a longer and more
expensive calling sequence is required. This is required for calls
-further than 32 megabytes (33,554,432 bytes) from the current location.
+farther than 32 megabytes (33,554,432 bytes) from the current location.
A short call is generated if the compiler knows
the call cannot be that far away. This setting can be overridden by
the @code{shortcall} function attribute, or by @code{#pragma
@@ -17463,13 +17486,13 @@ least @option{-funsafe-math-optimizations},
@option{-fno-trapping-math}). Note that while the throughput of the
sequence is generally higher than the throughput of the non-reciprocal
instruction, the precision of the sequence can be decreased by up to 2
-ulp (i.e. the inverse of 1.0 equals 0.99999994) for reciprocal square
+ulp (i.e.@: the inverse of 1.0 equals 0.99999994) for reciprocal square
roots.
@item -mrecip=@var{opt}
@opindex mrecip=opt
-This option allows to control which reciprocal estimate instructions
-may be used. @var{opt} is a comma separated list of options, which may
+This option controls which reciprocal estimate instructions
+may be used. @var{opt} is a comma-separated list of options, which may
be preceded by a @code{!} to invert the option:
@code{all}: enable all estimate instructions,
@code{default}: enable the default instructions, equivalent to @option{-mrecip},
@@ -17635,8 +17658,8 @@ actual register to hold the small data area pointer is chosen.
@itemx -mno-sim
@opindex msim
@opindex mno-sim
-Use the simulator runtime. The default is to use the libgloss board
-specific runtime.
+Use the simulator runtime. The default is to use the libgloss
+board-specific runtime.
@item -mas100-syntax
@itemx -mno-as100-syntax
@@ -17644,8 +17667,7 @@ specific runtime.
@opindex mno-as100-syntax
When generating assembler output use a syntax that is compatible with
Renesas's AS100 assembler. This syntax can also be handled by the GAS
-assembler but it has some restrictions so generating it is not the
-default option.
+assembler, but it has some restrictions so it is not generated by default.
@item -mmax-constant-size=@var{N}
@opindex mmax-constant-size
@@ -17909,14 +17931,14 @@ arrays. This is generally a bad idea with a limited stack size.
@itemx -mstack-size=@var{stack-size}
@opindex mstack-guard
@opindex mstack-size
-If these options are provided the s390 back end emits additional instructions in
-the function prologue which trigger a trap if the stack size is @var{stack-guard}
-bytes above the @var{stack-size} (remember that the stack on s390 grows downward).
+If these options are provided the S/390 back end emits additional instructions in
+the function prologue that trigger a trap if the stack size is @var{stack-guard}
+bytes above the @var{stack-size} (remember that the stack on S/390 grows downward).
If the @var{stack-guard} option is omitted the smallest power of 2 larger than
the frame size of the compiled function is chosen.
These options are intended to be used to help debugging stack overflow problems.
The additionally emitted code causes only little overhead and hence can also be
-used in production like systems without greater performance degradation. The given
+used in production-like systems without greater performance degradation. The given
values have to be exact powers of 2 and @var{stack-size} has to be greater than
@var{stack-guard} without exceeding 64k.
In order to be efficient the extra code makes the assumption that the stack starts
@@ -17941,11 +17963,11 @@ Compile code for little-endian mode.
@item -mnhwloop
@opindex mnhwloop
-Disable generate bcnz instruction.
+Disable generation of @code{bcnz} instructions.
@item -muls
@opindex muls
-Enable generate unaligned load and store instruction.
+Enable generation of unaligned load and store instructions.
@item -mmac
@opindex mmac
@@ -18444,7 +18466,7 @@ Specify @option{-mapp-regs} to generate output using the global registers
2 through 4, which the SPARC SVR4 ABI reserves for applications. This
is the default.
-To be fully SVR4 ABI compliant at the cost of some performance loss,
+To be fully SVR4 ABI-compliant at the cost of some performance loss,
specify @option{-mno-app-regs}. You should compile libraries and system
software with this option.
@@ -18827,7 +18849,7 @@ local copy of @code{argv} strings.
@item -mfixed-range=@var{register-range}
@opindex mfixed-range
Generate code treating the given register range as fixed registers.
-A fixed register is one that the register allocator can not use. This is
+A fixed register is one that the register allocator cannot use. This is
useful when compiling kernel code. A register range is specified as
two registers separated by a dash. Multiple register ranges can be
specified separated by a comma.
@@ -18838,7 +18860,7 @@ specified separated by a comma.
@opindex mea64
Compile code assuming that pointers to the PPU address space accessed
via the @code{__ea} named address space qualifier are either 32 or 64
-bits wide. The default is 32 bits. As this is an ABI changing option,
+bits wide. The default is 32 bits. As this is an ABI-changing option,
all object code in an executable must be compiled with the same setting.
@item -maddress-space-conversion
@@ -19794,7 +19816,7 @@ using universal character names.
@opindex fstack-check
Generate code to verify that you do not go beyond the boundary of the
stack. You should specify this flag if you are running in an
-environment with multiple threads, but only rarely need to specify it in
+environment with multiple threads, but you only rarely need to specify it in
a single-threaded environment since stack overflow is automatically
detected on nearly all systems if there is only one stack.
@@ -19822,7 +19844,7 @@ a warning is issued by the compiler.
@item
Inefficiency: because of both the modified allocation strategy and the
-generic implementation, the performances of the code are hampered.
+generic implementation, code performance is hampered.
@end enumerate
Note that old-style stack checking is also the fallback method for
@@ -19910,7 +19932,7 @@ Shared Libraries'' by Ulrich Drepper (which can be found at
@w{@uref{http://people.redhat.com/~drepper/}})---however a superior
solution made possible by this option to marking things hidden when
the default is public is to make the default hidden and mark things
-public. This is the norm with DLL's on Windows and with @option{-fvisibility=hidden}
+public. This is the norm with DLLs on Windows and with @option{-fvisibility=hidden}
and @code{__attribute__ ((visibility("default")))} instead of
@code{__declspec(dllexport)} you get almost identical semantics with
identical syntax. This is a great boon to those working with
@@ -19960,7 +19982,7 @@ structure fields, although the compiler usually honors those types
anyway) should use a single access of the width of the
field's type, aligned to a natural alignment if possible. For
example, targets with memory-mapped peripheral registers might require
-all such accesses to be 16 bits wide; with this flag the user could
+all such accesses to be 16 bits wide; with this flag you can
declare all peripheral bit-fields as @code{unsigned short} (assuming short
is 16 bits on these targets) to force GCC to use 16-bit accesses
instead of, perhaps, a more efficient 32-bit access.
diff --git a/gcc/except.c b/gcc/except.c
index 01745125a8e..ae5a11fdaa0 100644
--- a/gcc/except.c
+++ b/gcc/except.c
@@ -1243,7 +1243,7 @@ sjlj_emit_dispatch_table (rtx dispatch_label, int num_dispatch)
eh_region r;
edge e;
int i, disp_index;
- gimple switch_stmt;
+ VEC(tree, heap) *dispatch_labels = NULL;
fc = crtl->eh.sjlj_fc;
@@ -1289,17 +1289,8 @@ sjlj_emit_dispatch_table (rtx dispatch_label, int num_dispatch)
/* If there's exactly one call site in the function, don't bother
generating a switch statement. */
- switch_stmt = NULL;
if (num_dispatch > 1)
- {
- tree disp;
-
- mem = adjust_address (fc, TYPE_MODE (integer_type_node),
- sjlj_fc_call_site_ofs);
- disp = make_tree (integer_type_node, mem);
-
- switch_stmt = gimple_build_switch_nlabels (num_dispatch, disp, NULL);
- }
+ dispatch_labels = VEC_alloc (tree, heap, num_dispatch);
for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
if (lp && lp->post_landing_pad)
@@ -1317,8 +1308,7 @@ sjlj_emit_dispatch_table (rtx dispatch_label, int num_dispatch)
t_label = create_artificial_label (UNKNOWN_LOCATION);
t = build_int_cst (integer_type_node, disp_index);
case_elt = build_case_label (t, NULL, t_label);
- gimple_switch_set_label (switch_stmt, disp_index, case_elt);
-
+ VEC_quick_push (tree, dispatch_labels, case_elt);
label = label_rtx (t_label);
}
else
@@ -1371,7 +1361,16 @@ sjlj_emit_dispatch_table (rtx dispatch_label, int num_dispatch)
if (num_dispatch > 1)
{
+ gimple switch_stmt;
+ tree default_label = create_artificial_label (UNKNOWN_LOCATION);
+ rtx disp = adjust_address (fc, TYPE_MODE (integer_type_node),
+ sjlj_fc_call_site_ofs);
+ switch_stmt = gimple_build_switch (make_tree (integer_type_node, disp),
+ build_case_label (NULL, NULL,
+ default_label),
+ dispatch_labels);
expand_case (switch_stmt);
+ emit_label (label_rtx (default_label));
expand_builtin_trap ();
}
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index b386bb2c606..2bf51797847 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -14196,16 +14196,6 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
return op1;
}
- if ((TREE_CODE (arg0) == VECTOR_CST
- || TREE_CODE (arg0) == CONSTRUCTOR)
- && (TREE_CODE (arg1) == VECTOR_CST
- || TREE_CODE (arg1) == CONSTRUCTOR))
- {
- t = fold_vec_perm (type, arg0, arg1, sel);
- if (t != NULL_TREE)
- return t;
- }
-
if (all_in_vec0)
op1 = op0;
else if (all_in_vec1)
@@ -14216,6 +14206,16 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
need_mask_canon = true;
}
+ if ((TREE_CODE (op0) == VECTOR_CST
+ || TREE_CODE (op0) == CONSTRUCTOR)
+ && (TREE_CODE (op1) == VECTOR_CST
+ || TREE_CODE (op1) == CONSTRUCTOR))
+ {
+ t = fold_vec_perm (type, op0, op1, sel);
+ if (t != NULL_TREE)
+ return t;
+ }
+
if (op0 == op1 && !single_arg)
changed = true;
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index 6032a1a6a3e..dbafc449cc9 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,25 @@
+2012-09-04 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/54435
+ PR fortran/54443
+ * match.c (gfc_match_select_type): Make sure to only access CLASS_DATA
+ for BT_CLASS.
+
+2012-09-03 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/54467
+ * class.c (gfc_find_derived_vtab): Fix disabling of _final
+ by continuing to generate normal type-bound procedures.
+
+2012-09-03 Tobias Burnus <burnus@net-b.de>
+
+ * class.c (gfc_find_derived_vtab): Disable ABI-breaking
+ generation of the "_final" subroutine for now.
+
+2012-09-03 Tobias Burnus <burnus@net-b.de>
+
+ * class.c (finalize_component): Fixes to the comment.
+
2012-09-03 Alessandro Fanfarillo <fanfarillo.gcc@gmail.com>
Tobias Burnus <burnus@net-b.de>
diff --git a/gcc/fortran/class.c b/gcc/fortran/class.c
index 38a4ddb5302..dca2cfc1cda 100644
--- a/gcc/fortran/class.c
+++ b/gcc/fortran/class.c
@@ -720,8 +720,8 @@ has_finalizer_component (gfc_symbol *derived)
/* Call DEALLOCATE for the passed component if it is allocatable, if it is
neither allocatable nor a pointer but has a finalizer, call it. If it
- is a nonpointer component with allocatable or finalizes components, walk
- them. Either of the is required; other nonallocatables and pointers aren't
+ is a nonpointer component with allocatable components or has finalizers, walk
+ them. Either of them is required; other nonallocatables and pointers aren't
handled gracefully.
Note: If the component is allocatable, the DEALLOCATE handling takes care
of calling the appropriate finalizers, coarray deregistering, and
@@ -1624,7 +1624,9 @@ gfc_find_derived_vtab (gfc_symbol *derived)
components and the calls to finalization subroutines.
Note: The actual wrapper function can only be generated
at resolution time. */
-
+ /* FIXME: Enable ABI-breaking "_final" generation. */
+ if (0)
+ {
if (gfc_add_component (vtype, "_final", &c) == FAILURE)
goto cleanup;
c->attr.proc_pointer = 1;
@@ -1632,10 +1634,11 @@ gfc_find_derived_vtab (gfc_symbol *derived)
c->tb = XCNEW (gfc_typebound_proc);
c->tb->ppc = 1;
generate_finalization_wrapper (derived, ns, tname, c);
+ }
/* Add procedure pointers for type-bound procedures. */
add_procs_to_declared_vtab (derived, vtype);
- }
+ }
have_vtype:
vtab->ts.u.derived = vtype;
diff --git a/gcc/fortran/match.c b/gcc/fortran/match.c
index 4c713a5d6cb..cf85d527749 100644
--- a/gcc/fortran/match.c
+++ b/gcc/fortran/match.c
@@ -5368,10 +5368,10 @@ gfc_match_select_type (void)
array, which can have a reference, from other expressions that
have references, such as derived type components, and are not
allowed by the standard.
- TODO; see is it is sufficient to exclude component and substring
+ TODO: see if it is sufficient to exclude component and substring
references. */
class_array = expr1->expr_type == EXPR_VARIABLE
- && expr1->ts.type != BT_UNKNOWN
+ && expr1->ts.type == BT_CLASS
&& CLASS_DATA (expr1)
&& (strcmp (CLASS_DATA (expr1)->name, "_data") == 0)
&& (CLASS_DATA (expr1)->attr.dimension
diff --git a/gcc/gcc.c b/gcc/gcc.c
index 815747eaf0f..5f68d5978e3 100644
--- a/gcc/gcc.c
+++ b/gcc/gcc.c
@@ -3249,6 +3249,7 @@ driver_handle_option (struct gcc_options *opts,
add_linker_option ("--target-help", 13);
break;
+ case OPT__no_sysroot_suffix:
case OPT_pass_exit_codes:
case OPT_print_search_dirs:
case OPT_print_file_name_:
@@ -6340,6 +6341,7 @@ main (int argc, char **argv)
/* Process sysroot_suffix_spec. */
if (*sysroot_suffix_spec != 0
+ && !no_sysroot_suffix
&& do_spec_2 (sysroot_suffix_spec) == 0)
{
if (VEC_length (const_char_p, argbuf) > 1)
@@ -6363,6 +6365,7 @@ main (int argc, char **argv)
/* Process sysroot_hdrs_suffix_spec. */
if (*sysroot_hdrs_suffix_spec != 0
+ && !no_sysroot_suffix
&& do_spec_2 (sysroot_hdrs_suffix_spec) == 0)
{
if (VEC_length (const_char_p, argbuf) > 1)
diff --git a/gcc/gcov-dump.c b/gcc/gcov-dump.c
index 59b8380f299..fb01108946c 100644
--- a/gcc/gcov-dump.c
+++ b/gcc/gcov-dump.c
@@ -447,7 +447,8 @@ tag_summary (const char *filename ATTRIBUTE_UNUSED,
unsigned tag ATTRIBUTE_UNUSED, unsigned length ATTRIBUTE_UNUSED)
{
struct gcov_summary summary;
- unsigned ix;
+ unsigned ix, h_ix;
+ gcov_bucket_type *histo_bucket;
gcov_read_summary (&summary);
printf (" checksum=0x%08x", summary.checksum);
@@ -465,5 +466,24 @@ tag_summary (const char *filename ATTRIBUTE_UNUSED,
(HOST_WIDEST_INT)summary.ctrs[ix].run_max);
printf (", sum_max=" HOST_WIDEST_INT_PRINT_DEC,
(HOST_WIDEST_INT)summary.ctrs[ix].sum_max);
+ if (ix != GCOV_COUNTER_ARCS)
+ continue;
+ printf ("\n");
+ print_prefix (filename, 0, 0);
+ printf ("\t\tcounter histogram:");
+ for (h_ix = 0; h_ix < GCOV_HISTOGRAM_SIZE; h_ix++)
+ {
+ histo_bucket = &summary.ctrs[ix].histogram[h_ix];
+ if (!histo_bucket->num_counters)
+ continue;
+ printf ("\n");
+ print_prefix (filename, 0, 0);
+ printf ("\t\t%d: num counts=%u, min counter="
+ HOST_WIDEST_INT_PRINT_DEC ", cum_counter="
+ HOST_WIDEST_INT_PRINT_DEC,
+ h_ix, histo_bucket->num_counters,
+ (HOST_WIDEST_INT)histo_bucket->min_value,
+ (HOST_WIDEST_INT)histo_bucket->cum_value);
+ }
}
}
diff --git a/gcc/gcov-io.c b/gcc/gcov-io.c
index 37c1c3e3508..d64fb42c3c2 100644
--- a/gcc/gcov-io.c
+++ b/gcc/gcov-io.c
@@ -368,10 +368,25 @@ gcov_write_tag_length (gcov_unsigned_t tag, gcov_unsigned_t length)
GCOV_LINKAGE void
gcov_write_summary (gcov_unsigned_t tag, const struct gcov_summary *summary)
{
- unsigned ix;
+ unsigned ix, h_ix, bv_ix, h_cnt = 0;
const struct gcov_ctr_summary *csum;
-
- gcov_write_tag_length (tag, GCOV_TAG_SUMMARY_LENGTH);
+ unsigned histo_bitvector[GCOV_HISTOGRAM_BITVECTOR_SIZE];
+
+ /* Count number of non-zero histogram entries, and fill in a bit vector
+ of non-zero indices. The histogram is only currently computed for arc
+ counters. */
+ for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
+ histo_bitvector[bv_ix] = 0;
+ csum = &summary->ctrs[GCOV_COUNTER_ARCS];
+ for (h_ix = 0; h_ix < GCOV_HISTOGRAM_SIZE; h_ix++)
+ {
+ if (csum->histogram[h_ix].num_counters > 0)
+ {
+ histo_bitvector[h_ix / 32] |= 1 << (h_ix % 32);
+ h_cnt++;
+ }
+ }
+ gcov_write_tag_length (tag, GCOV_TAG_SUMMARY_LENGTH(h_cnt));
gcov_write_unsigned (summary->checksum);
for (csum = summary->ctrs, ix = GCOV_COUNTERS_SUMMABLE; ix--; csum++)
{
@@ -380,6 +395,22 @@ gcov_write_summary (gcov_unsigned_t tag, const struct gcov_summary *summary)
gcov_write_counter (csum->sum_all);
gcov_write_counter (csum->run_max);
gcov_write_counter (csum->sum_max);
+ if (ix != GCOV_COUNTER_ARCS)
+ {
+ for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
+ gcov_write_unsigned (0);
+ continue;
+ }
+ for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
+ gcov_write_unsigned (histo_bitvector[bv_ix]);
+ for (h_ix = 0; h_ix < GCOV_HISTOGRAM_SIZE; h_ix++)
+ {
+ if (!csum->histogram[h_ix].num_counters)
+ continue;
+ gcov_write_unsigned (csum->histogram[h_ix].num_counters);
+ gcov_write_counter (csum->histogram[h_ix].min_value);
+ gcov_write_counter (csum->histogram[h_ix].cum_value);
+ }
}
}
#endif /* IN_LIBGCOV */
@@ -488,8 +519,10 @@ gcov_read_string (void)
GCOV_LINKAGE void
gcov_read_summary (struct gcov_summary *summary)
{
- unsigned ix;
+ unsigned ix, h_ix, bv_ix, h_cnt = 0;
struct gcov_ctr_summary *csum;
+ unsigned histo_bitvector[GCOV_HISTOGRAM_BITVECTOR_SIZE];
+ unsigned cur_bitvector;
summary->checksum = gcov_read_unsigned ();
for (csum = summary->ctrs, ix = GCOV_COUNTERS_SUMMABLE; ix--; csum++)
@@ -499,6 +532,43 @@ gcov_read_summary (struct gcov_summary *summary)
csum->sum_all = gcov_read_counter ();
csum->run_max = gcov_read_counter ();
csum->sum_max = gcov_read_counter ();
+ memset (csum->histogram, 0,
+ sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
+ for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
+ {
+ histo_bitvector[bv_ix] = gcov_read_unsigned ();
+ h_cnt += __builtin_popcountll (histo_bitvector[bv_ix]);
+ }
+ bv_ix = 0;
+ h_ix = 0;
+ cur_bitvector = 0;
+ while (h_cnt--)
+ {
+ /* Find the index corresponding to the next entry we will read in.
+ First find the next non-zero bitvector and re-initialize
+ the histogram index accordingly, then right shift and increment
+ the index until we find a set bit. */
+ while (!cur_bitvector)
+ {
+ h_ix = bv_ix * 32;
+ cur_bitvector = histo_bitvector[bv_ix++];
+ gcc_assert(bv_ix <= GCOV_HISTOGRAM_BITVECTOR_SIZE);
+ }
+ while (!(cur_bitvector & 0x1))
+ {
+ h_ix++;
+ cur_bitvector >>= 1;
+ }
+ gcc_assert(h_ix < GCOV_HISTOGRAM_SIZE);
+
+ csum->histogram[h_ix].num_counters = gcov_read_unsigned ();
+ csum->histogram[h_ix].min_value = gcov_read_counter ();
+ csum->histogram[h_ix].cum_value = gcov_read_counter ();
+ /* Shift off the index we are done with and increment to the
+ corresponding next histogram entry. */
+ cur_bitvector >>= 1;
+ h_ix++;
+ }
}
}
@@ -550,3 +620,184 @@ gcov_time (void)
return status.st_mtime;
}
#endif /* IN_GCOV */
+
+#if IN_LIBGCOV || !IN_GCOV
+/* Determine the index into histogram for VALUE. */
+
+static unsigned
+gcov_histo_index(gcov_type value)
+{
+ gcov_type_unsigned v = (gcov_type_unsigned)value;
+ unsigned r = 0;
+ unsigned prev2bits = 0;
+
+ /* Find index into log2 scale histogram, where each of the log2
+ sized buckets is divided into 4 linear sub-buckets for better
+ focus in the higher buckets. */
+
+ /* Find the place of the most-significant bit set. */
+ if (v > 0)
+ r = 63 - __builtin_clzll (v);
+
+ /* If at most the 2 least significant bits are set (value is
+ 0 - 3) then that value is our index into the lowest set of
+ four buckets. */
+ if (r < 2)
+ return (unsigned)value;
+
+ gcc_assert (r < 64);
+
+ /* Find the two next most significant bits to determine which
+ of the four linear sub-buckets to select. */
+ prev2bits = (v >> (r - 2)) & 0x3;
+ /* Finally, compose the final bucket index from the log2 index and
+ the next 2 bits. The minimum r value at this point is 2 since we
+ returned above if r was 2 or more, so the minimum bucket at this
+ point is 4. */
+ return (r - 1) * 4 + prev2bits;
+}
+
+/* Merge SRC_HISTO into TGT_HISTO. The counters are assumed to be in
+ the same relative order in both histograms, and are matched up
+ and merged in reverse order. Each counter is assigned an equal portion of
+ its entry's original cumulative counter value when computing the
+ new merged cum_value. */
+
+static void gcov_histogram_merge(gcov_bucket_type *tgt_histo,
+ gcov_bucket_type *src_histo)
+{
+ int src_i, tgt_i, tmp_i = 0;
+ unsigned src_num, tgt_num, merge_num;
+ gcov_type src_cum, tgt_cum, merge_src_cum, merge_tgt_cum, merge_cum;
+ gcov_type merge_min;
+ gcov_bucket_type tmp_histo[GCOV_HISTOGRAM_SIZE];
+ int src_done = 0;
+
+ memset(tmp_histo, 0, sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
+
+ /* Assume that the counters are in the same relative order in both
+ histograms. Walk the histograms from largest to smallest entry,
+ matching up and combining counters in order. */
+ src_num = 0;
+ src_cum = 0;
+ src_i = GCOV_HISTOGRAM_SIZE - 1;
+ for (tgt_i = GCOV_HISTOGRAM_SIZE - 1; tgt_i >= 0 && !src_done; tgt_i--)
+ {
+ tgt_num = tgt_histo[tgt_i].num_counters;
+ tgt_cum = tgt_histo[tgt_i].cum_value;
+ /* Keep going until all of the target histogram's counters at this
+ position have been matched and merged with counters from the
+ source histogram. */
+ while (tgt_num > 0 && !src_done)
+ {
+ /* If this is either the first time through this loop or we just
+ exhausted the previous non-zero source histogram entry, look
+ for the next non-zero source histogram entry. */
+ if (!src_num)
+ {
+ /* Locate the next non-zero entry. */
+ while (src_i >= 0 && !src_histo[src_i].num_counters)
+ src_i--;
+ /* If source histogram has fewer counters, then just copy over the
+ remaining target counters and quit. */
+ if (src_i < 0)
+ {
+ tmp_histo[tgt_i].num_counters += tgt_num;
+ tmp_histo[tgt_i].cum_value += tgt_cum;
+ if (!tmp_histo[tgt_i].min_value ||
+ tgt_histo[tgt_i].min_value < tmp_histo[tgt_i].min_value)
+ tmp_histo[tgt_i].min_value = tgt_histo[tgt_i].min_value;
+ while (--tgt_i >= 0)
+ {
+ tmp_histo[tgt_i].num_counters
+ += tgt_histo[tgt_i].num_counters;
+ tmp_histo[tgt_i].cum_value += tgt_histo[tgt_i].cum_value;
+ if (!tmp_histo[tgt_i].min_value ||
+ tgt_histo[tgt_i].min_value
+ < tmp_histo[tgt_i].min_value)
+ tmp_histo[tgt_i].min_value = tgt_histo[tgt_i].min_value;
+ }
+
+ src_done = 1;
+ break;
+ }
+
+ src_num = src_histo[src_i].num_counters;
+ src_cum = src_histo[src_i].cum_value;
+ }
+
+ /* The number of counters to merge on this pass is the minimum
+ of the remaining counters from the current target and source
+ histogram entries. */
+ merge_num = tgt_num;
+ if (src_num < merge_num)
+ merge_num = src_num;
+
+ /* The merged min_value is the sum of the min_values from target
+ and source. */
+ merge_min = tgt_histo[tgt_i].min_value + src_histo[src_i].min_value;
+
+ /* Compute the portion of source and target entries' cum_value
+ that will be apportioned to the counters being merged.
+ The total remaining cum_value from each entry is divided
+ equally among the counters from that histogram entry if we
+ are not merging all of them. */
+ merge_src_cum = src_cum;
+ if (merge_num < src_num)
+ merge_src_cum = merge_num * src_cum / src_num;
+ merge_tgt_cum = tgt_cum;
+ if (merge_num < tgt_num)
+ merge_tgt_cum = merge_num * tgt_cum / tgt_num;
+ /* The merged cum_value is the sum of the source and target
+ components. */
+ merge_cum = merge_src_cum + merge_tgt_cum;
+
+ /* Update the remaining number of counters and cum_value left
+ to be merged from this source and target entry. */
+ src_cum -= merge_src_cum;
+ tgt_cum -= merge_tgt_cum;
+ src_num -= merge_num;
+ tgt_num -= merge_num;
+
+ /* The merged counters get placed in the new merged histogram
+ at the entry for the merged min_value. */
+ tmp_i = gcov_histo_index(merge_min);
+ gcc_assert (tmp_i < GCOV_HISTOGRAM_SIZE);
+ tmp_histo[tmp_i].num_counters += merge_num;
+ tmp_histo[tmp_i].cum_value += merge_cum;
+ if (!tmp_histo[tmp_i].min_value ||
+ merge_min < tmp_histo[tmp_i].min_value)
+ tmp_histo[tmp_i].min_value = merge_min;
+
+ /* Ensure the search for the next non-zero src_histo entry starts
+ at the next smallest histogram bucket. */
+ if (!src_num)
+ src_i--;
+ }
+ }
+
+ gcc_assert (tgt_i < 0);
+
+ /* In the case where there were more counters in the source histogram,
+ accumulate the remaining unmerged cumulative counter values. Add
+ those to the smallest non-zero target histogram entry. Otherwise,
+ the total cumulative counter values in the histogram will be smaller
+ than the sum_all stored in the summary, which will complicate
+ computing the working set information from the histogram later on. */
+ if (src_num)
+ src_i--;
+ while (src_i >= 0)
+ {
+ src_cum += src_histo[src_i].cum_value;
+ src_i--;
+ }
+ /* At this point, tmp_i should be the smallest non-zero entry in the
+ tmp_histo. */
+ gcc_assert(tmp_i >= 0 && tmp_i < GCOV_HISTOGRAM_SIZE
+ && tmp_histo[tmp_i].num_counters > 0);
+ tmp_histo[tmp_i].cum_value += src_cum;
+
+ /* Finally, copy the merged histogram into tgt_histo. */
+ memcpy(tgt_histo, tmp_histo, sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
+}
+#endif /* IN_LIBGCOV || !IN_GCOV */
diff --git a/gcc/gcov-io.h b/gcc/gcov-io.h
index 972dc931518..e1532d79bcd 100644
--- a/gcc/gcov-io.h
+++ b/gcc/gcov-io.h
@@ -139,7 +139,9 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
counts: header int64:count*
summary: int32:checksum {count-summary}GCOV_COUNTERS_SUMMABLE
count-summary: int32:num int32:runs int64:sum
- int64:max int64:sum_max
+ int64:max int64:sum_max histogram
+ histogram: {int32:bitvector}8 histogram-buckets*
+ histogram-buckets: int32:num int64:min int64:sum
The ANNOUNCE_FUNCTION record is the same as that in the note file,
but without the source location. The COUNTS gives the
@@ -171,8 +173,10 @@ typedef unsigned gcov_unsigned_t __attribute__ ((mode (SI)));
typedef unsigned gcov_position_t __attribute__ ((mode (SI)));
#if LONG_LONG_TYPE_SIZE > 32
typedef signed gcov_type __attribute__ ((mode (DI)));
+typedef unsigned gcov_type_unsigned __attribute__ ((mode (DI)));
#else
typedef signed gcov_type __attribute__ ((mode (SI)));
+typedef unsigned gcov_type_unsigned __attribute__ ((mode (SI)));
#endif
#else
#if BITS_PER_UNIT == 16
@@ -180,16 +184,20 @@ typedef unsigned gcov_unsigned_t __attribute__ ((mode (HI)));
typedef unsigned gcov_position_t __attribute__ ((mode (HI)));
#if LONG_LONG_TYPE_SIZE > 32
typedef signed gcov_type __attribute__ ((mode (SI)));
+typedef unsigned gcov_type_unsigned __attribute__ ((mode (SI)));
#else
typedef signed gcov_type __attribute__ ((mode (HI)));
+typedef unsigned gcov_type_unsigned __attribute__ ((mode (HI)));
#endif
#else
typedef unsigned gcov_unsigned_t __attribute__ ((mode (QI)));
typedef unsigned gcov_position_t __attribute__ ((mode (QI)));
#if LONG_LONG_TYPE_SIZE > 32
typedef signed gcov_type __attribute__ ((mode (HI)));
+typedef unsigned gcov_type_unsigned __attribute__ ((mode (HI)));
#else
typedef signed gcov_type __attribute__ ((mode (QI)));
+typedef unsigned gcov_type_unsigned __attribute__ ((mode (QI)));
#endif
#endif
#endif
@@ -210,6 +218,7 @@ typedef unsigned gcov_position_t;
#if IN_GCOV
#define GCOV_LINKAGE static
typedef HOST_WIDEST_INT gcov_type;
+typedef unsigned HOST_WIDEST_INT gcov_type_unsigned;
#if IN_GCOV > 0
#include <sys/types.h>
#endif
@@ -309,8 +318,9 @@ typedef HOST_WIDEST_INT gcov_type;
#define GCOV_TAG_COUNTER_NUM(LENGTH) ((LENGTH) / 2)
#define GCOV_TAG_OBJECT_SUMMARY ((gcov_unsigned_t)0xa1000000) /* Obsolete */
#define GCOV_TAG_PROGRAM_SUMMARY ((gcov_unsigned_t)0xa3000000)
-#define GCOV_TAG_SUMMARY_LENGTH \
- (1 + GCOV_COUNTERS_SUMMABLE * (2 + 3 * 2))
+#define GCOV_TAG_SUMMARY_LENGTH(NUM) \
+ (1 + GCOV_COUNTERS_SUMMABLE * (10 + 3 * 2) + (NUM) * 5)
+
/* Counters that are collected. */
#define GCOV_COUNTER_ARCS 0 /* Arc transitions. */
@@ -389,6 +399,29 @@ typedef HOST_WIDEST_INT gcov_type;
/* Structured records. */
+/* Structure used for each bucket of the log2 histogram of counter values. */
+typedef struct
+{
+ /* Number of counters whose profile count falls within the bucket. */
+ gcov_unsigned_t num_counters;
+ /* Smallest profile count included in this bucket. */
+ gcov_type min_value;
+ /* Cumulative value of the profile counts in this bucket. */
+ gcov_type cum_value;
+} gcov_bucket_type;
+
+/* For a log2 scale histogram with each range split into 4
+ linear sub-ranges, there will be at most 64 (max gcov_type bit size) - 1 log2
+ ranges since the lowest 2 log2 values share the lowest 4 linear
+ sub-range (values 0 - 3). This is 252 total entries (63*4). */
+
+#define GCOV_HISTOGRAM_SIZE 252
+
+/* How many unsigned ints are required to hold a bit vector of non-zero
+ histogram entries when the histogram is written to the gcov file.
+ This is essentially a ceiling divide by 32 bits. */
+#define GCOV_HISTOGRAM_BITVECTOR_SIZE (GCOV_HISTOGRAM_SIZE + 31) / 32
+
/* Cumulative counter data. */
struct gcov_ctr_summary
{
@@ -397,6 +430,8 @@ struct gcov_ctr_summary
gcov_type sum_all; /* sum of all counters accumulated. */
gcov_type run_max; /* maximum value on a single run. */
gcov_type sum_max; /* sum of individual run max values. */
+ gcov_bucket_type histogram[GCOV_HISTOGRAM_SIZE]; /* histogram of
+ counter values. */
};
/* Object & program summary record. */
diff --git a/gcc/gimple-pretty-print.c b/gcc/gimple-pretty-print.c
index 658e0beea74..e4550c01104 100644
--- a/gcc/gimple-pretty-print.c
+++ b/gcc/gimple-pretty-print.c
@@ -770,9 +770,7 @@ dump_gimple_switch (pretty_printer *buffer, gimple gs, int spc, int flags)
for (i = 0; i < gimple_switch_num_labels (gs); i++)
{
tree case_label = gimple_switch_label (gs, i);
- if (case_label == NULL_TREE)
- continue;
-
+ gcc_checking_assert (case_label != NULL_TREE);
dump_generic_node (buffer, case_label, spc, flags, false);
pp_character (buffer, ' ');
dump_generic_node (buffer, CASE_LABEL (case_label), spc, flags, false);
diff --git a/gcc/gimple.c b/gcc/gimple.c
index d78c60f22e8..88fa7627e84 100644
--- a/gcc/gimple.c
+++ b/gcc/gimple.c
@@ -803,39 +803,14 @@ gimple
gimple_build_switch_nlabels (unsigned nlabels, tree index, tree default_label)
{
/* nlabels + 1 default label + 1 index. */
+ gcc_checking_assert (default_label);
gimple p = gimple_build_with_ops (GIMPLE_SWITCH, ERROR_MARK,
- 1 + (default_label != NULL) + nlabels);
+ 1 + 1 + nlabels);
gimple_switch_set_index (p, index);
- if (default_label)
- gimple_switch_set_default_label (p, default_label);
+ gimple_switch_set_default_label (p, default_label);
return p;
}
-
-/* Build a GIMPLE_SWITCH statement.
-
- INDEX is the switch's index.
- NLABELS is the number of labels in the switch excluding the DEFAULT_LABEL.
- ... are the labels excluding the default. */
-
-gimple
-gimple_build_switch (unsigned nlabels, tree index, tree default_label, ...)
-{
- va_list al;
- unsigned i, offset;
- gimple p = gimple_build_switch_nlabels (nlabels, index, default_label);
-
- /* Store the rest of the labels. */
- va_start (al, default_label);
- offset = (default_label != NULL);
- for (i = 0; i < nlabels; i++)
- gimple_switch_set_label (p, i + offset, va_arg (al, tree));
- va_end (al);
-
- return p;
-}
-
-
/* Build a GIMPLE_SWITCH statement.
INDEX is the switch's index.
@@ -843,15 +818,15 @@ gimple_build_switch (unsigned nlabels, tree index, tree default_label, ...)
ARGS is a vector of labels excluding the default. */
gimple
-gimple_build_switch_vec (tree index, tree default_label, VEC(tree, heap) *args)
+gimple_build_switch (tree index, tree default_label, VEC(tree, heap) *args)
{
- unsigned i, offset, nlabels = VEC_length (tree, args);
+ unsigned i, nlabels = VEC_length (tree, args);
+
gimple p = gimple_build_switch_nlabels (nlabels, index, default_label);
/* Copy the labels from the vector to the switch statement. */
- offset = (default_label != NULL);
for (i = 0; i < nlabels; i++)
- gimple_switch_set_label (p, i + offset, VEC_index (tree, args, i));
+ gimple_switch_set_label (p, i + 1, VEC_index (tree, args, i));
return p;
}
diff --git a/gcc/gimple.h b/gcc/gimple.h
index 827103d0eb3..15b597fc187 100644
--- a/gcc/gimple.h
+++ b/gcc/gimple.h
@@ -781,8 +781,7 @@ gimple gimple_build_wce (gimple_seq);
gimple gimple_build_resx (int);
gimple gimple_build_eh_dispatch (int);
gimple gimple_build_switch_nlabels (unsigned, tree, tree);
-gimple gimple_build_switch (unsigned, tree, tree, ...);
-gimple gimple_build_switch_vec (tree, tree, VEC(tree,heap) *);
+gimple gimple_build_switch (tree, tree, VEC(tree,heap) *);
gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree);
gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree);
gimple gimple_build_omp_for (gimple_seq, tree, size_t, gimple_seq);
@@ -3639,7 +3638,9 @@ gimple_switch_set_label (gimple gs, unsigned index, tree label)
static inline tree
gimple_switch_default_label (const_gimple gs)
{
- return gimple_switch_label (gs, 0);
+ tree label = gimple_switch_label (gs, 0);
+ gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label));
+ return label;
}
/* Set the default label for a switch statement. */
@@ -3647,6 +3648,7 @@ gimple_switch_default_label (const_gimple gs)
static inline void
gimple_switch_set_default_label (gimple gs, tree label)
{
+ gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label));
gimple_switch_set_label (gs, 0, label);
}
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index 27930277c9c..03973537ee6 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -1675,7 +1675,7 @@ preprocess_case_label_vec_for_gimple (VEC(tree,heap) *labels,
gcc_assert (!default_case);
default_case = elt;
/* The default case must be passed separately to the
- gimple_build_switch routines. But if DEFAULT_CASEP
+ gimple_build_switch routine. But if DEFAULT_CASEP
is NULL, we do not remove the default case (it would
be completely lost). */
if (default_casep)
@@ -1788,8 +1788,8 @@ gimplify_switch_expr (tree *expr_p, gimple_seq *pre_p)
gimplify_seq_add_stmt (&switch_body_seq, new_default);
}
- gimple_switch = gimple_build_switch_vec (SWITCH_COND (switch_expr),
- default_case, labels);
+ gimple_switch = gimple_build_switch (SWITCH_COND (switch_expr),
+ default_case, labels);
gimplify_seq_add_stmt (pre_p, gimple_switch);
gimplify_seq_add_seq (pre_p, switch_body_seq);
VEC_free(tree, heap, labels);
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index adbd0345f9e..9474167ce6d 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -4873,7 +4873,7 @@ expand_omp_sections (struct omp_region *region)
u = build_case_label (NULL, NULL, t);
make_edge (l0_bb, default_bb, 0);
- stmt = gimple_build_switch_vec (vmain, u, label_vec);
+ stmt = gimple_build_switch (vmain, u, label_vec);
gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
gsi_remove (&switch_si, true);
VEC_free (tree, heap, label_vec);
diff --git a/gcc/profile.c b/gcc/profile.c
index 3d0689afac3..a5029a1037b 100644
--- a/gcc/profile.c
+++ b/gcc/profile.c
@@ -84,6 +84,15 @@ struct bb_info {
const struct gcov_ctr_summary *profile_info;
+/* Number of data points in the working set summary array. Using 128
+ provides information for at least every 1% increment of the total
+ profile size. The last entry is hardwired to 99.9% of the total. */
+#define NUM_GCOV_WORKING_SETS 128
+
+/* Counter working set information computed from the current counter
+ summary. Not initialized unless profile_info summary is non-NULL. */
+static gcov_working_set_t gcov_working_sets[NUM_GCOV_WORKING_SETS];
+
/* Collect statistics on the performance of this pass for the entire source
file. */
@@ -192,6 +201,152 @@ instrument_values (histogram_values values)
}
+/* Compute the working set information from the counter histogram in
+ the profile summary. This is an array of information corresponding to a
+ range of percentages of the total execution count (sum_all), and includes
+ the number of counters required to cover that working set percentage and
+ the minimum counter value in that working set. */
+
+static void
+compute_working_sets (void)
+{
+ gcov_type working_set_cum_values[NUM_GCOV_WORKING_SETS];
+ gcov_type ws_cum_hotness_incr;
+ gcov_type cum, tmp_cum;
+ const gcov_bucket_type *histo_bucket;
+ unsigned ws_ix, c_num, count, pctinc, pct;
+ int h_ix;
+ gcov_working_set_t *ws_info;
+
+ if (!profile_info)
+ return;
+
+ /* Compute the amount of sum_all that the cumulative hotness grows
+ by in each successive working set entry, which depends on the
+ number of working set entries. */
+ ws_cum_hotness_incr = profile_info->sum_all / NUM_GCOV_WORKING_SETS;
+
+ /* Next fill in an array of the cumulative hotness values corresponding
+ to each working set summary entry we are going to compute below.
+ Skip 0% statistics, which can be extrapolated from the
+ rest of the summary data. */
+ cum = ws_cum_hotness_incr;
+ for (ws_ix = 0; ws_ix < NUM_GCOV_WORKING_SETS;
+ ws_ix++, cum += ws_cum_hotness_incr)
+ working_set_cum_values[ws_ix] = cum;
+ /* The last summary entry is reserved for (roughly) 99.9% of the
+ working set. Divide by 1024 so it becomes a shift, which gives
+ almost exactly 99.9%. */
+ working_set_cum_values[NUM_GCOV_WORKING_SETS-1]
+ = profile_info->sum_all - profile_info->sum_all/1024;
+
+ /* Next, walk through the histogram in decending order of hotness
+ and compute the statistics for the working set summary array.
+ As histogram entries are accumulated, we check to see which
+ working set entries have had their expected cum_value reached
+ and fill them in, walking the working set entries in increasing
+ size of cum_value. */
+ ws_ix = 0; /* The current entry into the working set array. */
+ cum = 0; /* The current accumulated counter sum. */
+ count = 0; /* The current accumulated count of block counters. */
+ for (h_ix = GCOV_HISTOGRAM_SIZE - 1;
+ h_ix >= 0 && ws_ix < NUM_GCOV_WORKING_SETS; h_ix--)
+ {
+ histo_bucket = &profile_info->histogram[h_ix];
+
+ /* If we haven't reached the required cumulative counter value for
+ the current working set percentage, simply accumulate this histogram
+ entry into the running sums and continue to the next histogram
+ entry. */
+ if (cum + histo_bucket->cum_value < working_set_cum_values[ws_ix])
+ {
+ cum += histo_bucket->cum_value;
+ count += histo_bucket->num_counters;
+ continue;
+ }
+
+ /* If adding the current histogram entry's cumulative counter value
+ causes us to exceed the current working set size, then estimate
+ how many of this histogram entry's counter values are required to
+ reach the working set size, and fill in working set entries
+ as we reach their expected cumulative value. */
+ for (c_num = 0, tmp_cum = cum;
+ c_num < histo_bucket->num_counters && ws_ix < NUM_GCOV_WORKING_SETS;
+ c_num++)
+ {
+ count++;
+ /* If we haven't reached the last histogram entry counter, add
+ in the minimum value again. This will underestimate the
+ cumulative sum so far, because many of the counter values in this
+ entry may have been larger than the minimum. We could add in the
+ average value every time, but that would require an expensive
+ divide operation. */
+ if (c_num + 1 < histo_bucket->num_counters)
+ tmp_cum += histo_bucket->min_value;
+ /* If we have reached the last histogram entry counter, then add
+ in the entire cumulative value. */
+ else
+ tmp_cum = cum + histo_bucket->cum_value;
+
+ /* Next walk through successive working set entries and fill in
+ the statistics for any whose size we have reached by accumulating
+ this histogram counter. */
+ while (tmp_cum >= working_set_cum_values[ws_ix]
+ && ws_ix < NUM_GCOV_WORKING_SETS)
+ {
+ gcov_working_sets[ws_ix].num_counters = count;
+ gcov_working_sets[ws_ix].min_counter
+ = histo_bucket->min_value;
+ ws_ix++;
+ }
+ }
+ /* Finally, update the running cumulative value since we were
+ using a temporary above. */
+ cum += histo_bucket->cum_value;
+ }
+ gcc_assert (ws_ix == NUM_GCOV_WORKING_SETS);
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "Counter working sets:\n");
+ /* Multiply the percentage by 100 to avoid float. */
+ pctinc = 100 * 100 / NUM_GCOV_WORKING_SETS;
+ for (ws_ix = 0, pct = pctinc; ws_ix < NUM_GCOV_WORKING_SETS;
+ ws_ix++, pct += pctinc)
+ {
+ if (ws_ix == NUM_GCOV_WORKING_SETS - 1)
+ pct = 9990;
+ ws_info = &gcov_working_sets[ws_ix];
+ /* Print out the percentage using int arithmatic to avoid float. */
+ fprintf (dump_file, "\t\t%u.%02u%%: num counts=%u, min counter="
+ HOST_WIDEST_INT_PRINT_DEC "\n",
+ pct / 100, pct - (pct / 100 * 100),
+ ws_info->num_counters,
+ (HOST_WIDEST_INT)ws_info->min_counter);
+ }
+ }
+}
+
+/* Given a the desired percentage of the full profile (sum_all from the
+ summary), multiplied by 10 to avoid float in PCT_TIMES_10, returns
+ the corresponding working set information. If an exact match for
+ the percentage isn't found, the closest value is used. */
+
+gcov_working_set_t *
+find_working_set (unsigned pct_times_10)
+{
+ unsigned i;
+ if (!profile_info)
+ return NULL;
+ gcc_assert (pct_times_10 <= 1000);
+ if (pct_times_10 >= 999)
+ return &gcov_working_sets[NUM_GCOV_WORKING_SETS - 1];
+ i = pct_times_10 * NUM_GCOV_WORKING_SETS / 1000;
+ if (!i)
+ return &gcov_working_sets[0];
+ return &gcov_working_sets[i - 1];
+}
+
/* Computes hybrid profile for all matching entries in da_file.
CFG_CHECKSUM is the precomputed checksum for the CFG. */
@@ -219,6 +374,8 @@ get_exec_counts (unsigned cfg_checksum, unsigned lineno_checksum)
if (!counts)
return NULL;
+ compute_working_sets();
+
if (dump_file && profile_info)
fprintf(dump_file, "Merged %u profiles with maximal count %u.\n",
profile_info->runs, (unsigned) profile_info->sum_max);
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index 0498cede694..4a277f4a279 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -3473,7 +3473,7 @@ static bool
gate_handle_sched (void)
{
#ifdef INSN_SCHEDULING
- return flag_schedule_insns && dbg_cnt (sched_func);
+ return optimize > 0 && flag_schedule_insns && dbg_cnt (sched_func);
#else
return 0;
#endif
diff --git a/gcc/stmt.c b/gcc/stmt.c
index 11180e4dc4a..8d76b3eea08 100644
--- a/gcc/stmt.c
+++ b/gcc/stmt.c
@@ -1951,7 +1951,7 @@ expand_case (gimple stmt)
tree minval = NULL_TREE, maxval = NULL_TREE, range = NULL_TREE;
rtx default_label = NULL_RTX;
unsigned int count, uniq;
- int i, stopi = 0;
+ int i;
rtx before_case, end;
int ncases = gimple_switch_num_labels (stmt);
tree index_expr = gimple_switch_index (stmt);
@@ -1986,16 +1986,11 @@ expand_case (gimple stmt)
do_pending_stack_adjust ();
- /* The default case, if ever taken, is the first element. */
- elt = gimple_switch_label (stmt, 0);
- if (!CASE_LOW (elt) && !CASE_HIGH (elt))
- {
- default_label = label_rtx (CASE_LABEL (elt));
- stopi = 1;
- }
+ /* Find the default case target label. */
+ default_label = label_rtx (CASE_LABEL (gimple_switch_default_label (stmt)));
/* Get upper and lower bounds of case values. */
- elt = gimple_switch_label (stmt, stopi);
+ elt = gimple_switch_label (stmt, 1);
minval = fold_convert (index_type, CASE_LOW (elt));
elt = gimple_switch_label (stmt, ncases - 1);
if (CASE_HIGH (elt))
@@ -2011,7 +2006,7 @@ expand_case (gimple stmt)
uniq = 0;
count = 0;
label_bitmap = BITMAP_ALLOC (NULL);
- for (i = gimple_switch_num_labels (stmt) - 1; i >= stopi; --i)
+ for (i = gimple_switch_num_labels (stmt) - 1; i >= 1; --i)
{
tree low, high;
rtx lab;
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index dcce0824941..62dfe13afc3 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,37 @@
+2012-09-04 Jason Merrill <jason@redhat.com>
+
+ PR c++/54441
+ * g++.dg/ext/flexary3.C: New.
+
+ PR c++/54420
+ * g++.dg/cpp0x/lambda/lambda-intname.C: New.
+
+ PR c++/54198
+ * g++.dg/template/defarg15.C: New.
+
+ PR c++/54437
+ * g++.dg/template/access24.C: New.
+
+2012-09-04 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/54458
+ * gcc.dg/torture/pr54458.c: New testcase.
+
+2012-09-04 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/arm/neon-vext.c: New test.
+ * gcc.target/arm/neon-vext-execute.c: Ditto.
+
+2012-09-04 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/54243
+ PR fortran/54244
+ * gfortran.dg/select_type_29.f03: New.
+
+2012-09-03 Marc Glisse <marc.glisse@inria.fr>
+
+ * gcc.dg/fold-perm.c: Improve test.
+
2012-09-03 Tobias Burnus <burnus@net-b.de>
PR fortran/51632
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-intname.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-intname.C
new file mode 100644
index 00000000000..4c268c67820
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-intname.C
@@ -0,0 +1,6 @@
+// PR c++/54420
+
+class __lambda
+{
+ virtual bool is_sub ();
+};
diff --git a/gcc/testsuite/g++.dg/ext/flexary3.C b/gcc/testsuite/g++.dg/ext/flexary3.C
new file mode 100644
index 00000000000..906877b11b7
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ext/flexary3.C
@@ -0,0 +1,10 @@
+// PR c++/54441
+// { dg-options "" }
+
+struct s { char c[]; };
+
+int main()
+{
+ struct s s = { .c = 0 }; // { dg-error "initializer" }
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/template/access24.C b/gcc/testsuite/g++.dg/template/access24.C
new file mode 100644
index 00000000000..9f192266ec7
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/access24.C
@@ -0,0 +1,8 @@
+// PR c++/54437
+
+template <void (*P)()> void f();
+class A {
+ template <class T> static void g();
+ template <class T> static void h () { f<g<T> >(); }
+ static void i() { h<int>(); }
+};
diff --git a/gcc/testsuite/g++.dg/template/defarg15.C b/gcc/testsuite/g++.dg/template/defarg15.C
new file mode 100644
index 00000000000..fea3deee71c
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/defarg15.C
@@ -0,0 +1,19 @@
+// PR c++/54198
+
+template <typename T> void
+refIfNotNull (T* p1)
+{
+ p1->ref;
+}
+template <typename T> struct A
+{
+ A (T* p1)
+ {
+ refIfNotNull (p1);
+ }
+};
+class B;
+class C
+{
+ void getParent (A <B> = 0);
+};
diff --git a/gcc/testsuite/gcc.dg/fold-perm.c b/gcc/testsuite/gcc.dg/fold-perm.c
index 7396c1dfadc..2270c7b0f54 100644
--- a/gcc/testsuite/gcc.dg/fold-perm.c
+++ b/gcc/testsuite/gcc.dg/fold-perm.c
@@ -3,11 +3,12 @@
typedef int veci __attribute__ ((vector_size (4 * sizeof (int))));
-void fun (veci *f, veci *g, veci *h)
+void fun (veci *f, veci *g, veci *h, veci *i)
{
veci m = { 7, 7, 4, 6 };
veci n = { 0, 1, 2, 3 };
veci p = { 1, 1, 7, 6 };
+ *i = __builtin_shuffle (*i, p, m);
*h = __builtin_shuffle (*h, *h, p);
*g = __builtin_shuffle (*f, *g, m);
*f = __builtin_shuffle (*f, *g, n);
diff --git a/gcc/testsuite/gcc.dg/torture/pr54458.c b/gcc/testsuite/gcc.dg/torture/pr54458.c
new file mode 100644
index 00000000000..3d2e12fc2fd
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr54458.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+
+unsigned int a, b, c;
+
+void
+foo (unsigned int x)
+{
+ do
+ {
+ if (a == 0 ? 1 : 1 % a)
+ for (; b; b--)
+ lab:;
+ else
+ while (x)
+ ;
+ if (c)
+ goto lab;
+ }
+ while (1);
+}
diff --git a/gcc/testsuite/gcc.target/arm/neon-vext-execute.c b/gcc/testsuite/gcc.target/arm/neon-vext-execute.c
new file mode 100644
index 00000000000..3d6c28cca89
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/neon-vext-execute.c
@@ -0,0 +1,340 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-require-effective-target arm_little_endian } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_neon } */
+
+#include <arm_neon.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+uint8x8_t
+tst_vext_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ uint8x8_t __mask1 = {2, 3, 4, 5, 6, 7, 8, 9};
+
+ return __builtin_shuffle ( __a, __b, __mask1) ;
+}
+
+uint8x8_t
+tst_vext_u8_rotate (uint8x8_t __a)
+{
+ uint8x8_t __mask1 = {2, 3, 4, 5, 6, 7, 0, 1};
+ return __builtin_shuffle ( __a, __mask1) ;
+}
+
+uint16x4_t
+tst_vext_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ uint16x4_t __mask1 = {2, 3, 4, 5};
+ return __builtin_shuffle ( __a, __b, __mask1) ;
+}
+
+uint16x4_t
+tst_vext_u16_rotate (uint16x4_t __a)
+{
+ uint16x4_t __mask1 = {2, 3, 0, 1};
+ return __builtin_shuffle ( __a, __mask1) ;
+}
+
+uint32x2_t
+tst_vext_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ uint32x2_t __mask1 = {1, 2};
+ return __builtin_shuffle ( __a, __b, __mask1) ;
+}
+
+/* This one is mapped into vrev64.32. */
+uint32x2_t
+tst_vext_u32_rotate (uint32x2_t __a)
+{
+ uint32x2_t __mask1 = {1, 0};
+ return __builtin_shuffle ( __a, __mask1) ;
+}
+
+uint8x16_t
+tst_vextq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ uint8x16_t __mask1 = {4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19};
+ return __builtin_shuffle ( __a, __b, __mask1) ;
+}
+
+uint8x16_t
+tst_vextq_u8_rotate (uint8x16_t __a)
+{
+ uint8x16_t __mask1 = {4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 0, 1, 2, 3};
+ return __builtin_shuffle ( __a, __mask1) ;
+}
+
+uint16x8_t
+tst_vextq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ uint16x8_t __mask1 = {2, 3, 4, 5, 6, 7, 8, 9};
+ return __builtin_shuffle ( __a, __b, __mask1) ;
+}
+
+uint16x8_t
+tst_vextq_u16_rotate (uint16x8_t __a)
+{
+ uint16x8_t __mask1 = {2, 3, 4, 5, 6, 7, 0, 1};
+ return __builtin_shuffle ( __a, __mask1) ;
+}
+
+uint32x4_t
+tst_vextq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ uint32x4_t __mask1 = {1, 2, 3, 4};
+ return __builtin_shuffle ( __a, __b, __mask1) ;
+}
+
+uint32x4_t
+tst_vextq_u32_rotate (uint32x4_t __a)
+{
+ uint32x4_t __mask1 = {1, 2, 3, 0};
+ return __builtin_shuffle ( __a, __mask1) ;
+}
+
+uint64x2_t
+tst_vextq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ uint64x2_t __mask1 = {1, 2};
+ return __builtin_shuffle ( __a, __b, __mask1) ;
+}
+
+uint64x2_t
+tst_vextq_u64_rotate (uint64x2_t __a)
+{
+ uint64x2_t __mask1 = {1, 0};
+ return __builtin_shuffle ( __a, __mask1) ;
+}
+
+int main (void)
+{
+ uint8_t arr_u8x8[] = {0, 1, 2, 3, 4, 5, 6, 7};
+ uint8_t arr2_u8x8[] = {8, 9, 10, 11, 12, 13, 14, 15};
+ uint16_t arr_u16x4[] = {0, 1, 2, 3};
+ uint16_t arr2_u16x4[] = {4, 5, 6, 7};
+ uint32_t arr_u32x2[] = {0, 1};
+ uint32_t arr2_u32x2[] = {2, 3};
+ uint8_t arr_u8x16[] = {0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15};
+ uint8_t arr2_u8x16[] = {16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31};
+ uint16_t arr_u16x8[] = {0, 1, 2, 3, 4, 5, 6, 7};
+ uint16_t arr2_u16x8[] = {8, 9, 10, 11, 12, 13, 14, 15};
+ uint32_t arr_u32x4[] = {0, 1, 2, 3};
+ uint32_t arr2_u32x4[] = {4, 5, 6, 7};
+ uint64_t arr_u64x2[] = {0, 1};
+ uint64_t arr2_u64x2[] = {2, 3};
+
+ uint8_t expected_u8x8[] = {2, 3, 4, 5, 6, 7, 8, 9};
+ uint8_t expected_rot_u8x8[] = {2, 3, 4, 5, 6, 7, 0, 1};
+ uint16_t expected_u16x4[] = {2, 3, 4, 5};
+ uint16_t expected_rot_u16x4[] = {2, 3, 0, 1};
+ uint32_t expected_u32x2[] = {1, 2};
+ uint32_t expected_rot_u32x2[] = {1, 0};
+ uint8_t expected_u8x16[] = {4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19};
+ uint8_t expected_rot_u8x16[] = {4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 0, 1, 2, 3,};
+ uint16_t expected_u16x8[] = {2, 3, 4, 5, 6, 7, 8, 9};
+ uint16_t expected_rot_u16x8[] = {2, 3, 4, 5, 6, 7, 0, 1};
+ uint32_t expected_u32x4[] = {1, 2, 3, 4};
+ uint32_t expected_rot_u32x4[] = {1, 2, 3, 0};
+ uint64_t expected_u64x2[] = {1, 2};
+ uint64_t expected_rot_u64x2[] = {1, 0};
+
+ uint8x8_t vec_u8x8 = vld1_u8 (arr_u8x8);
+ uint8x8_t vec2_u8x8 = vld1_u8 (arr2_u8x8);
+ uint16x4_t vec_u16x4 = vld1_u16 (arr_u16x4);
+ uint16x4_t vec2_u16x4 = vld1_u16 (arr2_u16x4);
+ uint32x2_t vec_u32x2 = vld1_u32 (arr_u32x2);
+ uint32x2_t vec2_u32x2 = vld1_u32 (arr2_u32x2);
+ uint8x16_t vec_u8x16 = vld1q_u8 (arr_u8x16);
+ uint8x16_t vec2_u8x16 = vld1q_u8 (arr2_u8x16);
+ uint16x8_t vec_u16x8 = vld1q_u16 (arr_u16x8);
+ uint16x8_t vec2_u16x8 = vld1q_u16 (arr2_u16x8);
+ uint32x4_t vec_u32x4 = vld1q_u32 (arr_u32x4);
+ uint32x4_t vec2_u32x4 = vld1q_u32 (arr2_u32x4);
+ uint64x2_t vec_u64x2 = vld1q_u64 (arr_u64x2);
+ uint64x2_t vec2_u64x2 = vld1q_u64 (arr2_u64x2);
+
+ uint8x8_t result_u8x8;
+ uint16x4_t result_u16x4;
+ uint32x2_t result_u32x2;
+ uint8x16_t result_u8x16;
+ uint16x8_t result_u16x8;
+ uint32x4_t result_u32x4;
+ uint64x2_t result_u64x2;
+
+ union {uint8x8_t v; uint8_t buf[8];} mem_u8x8;
+ union {uint16x4_t v; uint16_t buf[4];} mem_u16x4;
+ union {uint32x2_t v; uint32_t buf[2];} mem_u32x2;
+ union {uint8x16_t v; uint8_t buf[16];} mem_u8x16;
+ union {uint16x8_t v; uint16_t buf[8];} mem_u16x8;
+ union {uint32x4_t v; uint32_t buf[4];} mem_u32x4;
+ union {uint64x2_t v; uint64_t buf[2];} mem_u64x2;
+
+ int i;
+
+ result_u8x8 = tst_vext_u8 (vec_u8x8, vec2_u8x8);
+ vst1_u8 (mem_u8x8.buf, result_u8x8);
+
+ for (i=0; i<8; i++)
+ if (mem_u8x8.buf[i] != expected_u8x8[i])
+ {
+ printf ("tst_vext_u8[%d]=%d expected %d\n",
+ i, mem_u8x8.buf[i], expected_u8x8[i]);
+ abort ();
+ }
+
+ result_u8x8 = tst_vext_u8_rotate (vec_u8x8);
+ vst1_u8 (mem_u8x8.buf, result_u8x8);
+
+ for (i=0; i<8; i++)
+ if (mem_u8x8.buf[i] != expected_rot_u8x8[i])
+ {
+ printf ("tst_vext_u8_rotate[%d]=%d expected %d\n",
+ i, mem_u8x8.buf[i], expected_rot_u8x8[i]);
+ abort ();
+ }
+
+
+ result_u16x4 = tst_vext_u16 (vec_u16x4, vec2_u16x4);
+ vst1_u16 (mem_u16x4.buf, result_u16x4);
+
+ for (i=0; i<4; i++)
+ if (mem_u16x4.buf[i] != expected_u16x4[i])
+ {
+ printf ("tst_vext_u16[%d]=%d expected %d\n",
+ i, mem_u16x4.buf[i], expected_u16x4[i]);
+ abort ();
+ }
+
+ result_u16x4 = tst_vext_u16_rotate (vec_u16x4);
+ vst1_u16 (mem_u16x4.buf, result_u16x4);
+
+ for (i=0; i<4; i++)
+ if (mem_u16x4.buf[i] != expected_rot_u16x4[i])
+ {
+ printf ("tst_vext_u16_rotate[%d]=%d expected %d\n",
+ i, mem_u16x4.buf[i], expected_rot_u16x4[i]);
+ abort ();
+ }
+
+
+ result_u32x2 = tst_vext_u32 (vec_u32x2, vec2_u32x2);
+ vst1_u32 (mem_u32x2.buf, result_u32x2);
+
+ for (i=0; i<2; i++)
+ if (mem_u32x2.buf[i] != expected_u32x2[i])
+ {
+ printf ("tst_vext_u32[%d]=%d expected %d\n",
+ i, mem_u32x2.buf[i], expected_u32x2[i]);
+ abort ();
+ }
+
+ result_u32x2 = tst_vext_u32_rotate (vec_u32x2);
+ vst1_u32 (mem_u32x2.buf, result_u32x2);
+
+ for (i=0; i<2; i++)
+ if (mem_u32x2.buf[i] != expected_rot_u32x2[i])
+ {
+ printf ("tst_vext_u32_rotate[%d]=%d expected %d\n",
+ i, mem_u32x2.buf[i], expected_rot_u32x2[i]);
+ abort ();
+ }
+
+
+ result_u8x16 = tst_vextq_u8 (vec_u8x16, vec2_u8x16);
+ vst1q_u8 (mem_u8x16.buf, result_u8x16);
+
+ for (i=0; i<16; i++)
+ if (mem_u8x16.buf[i] != expected_u8x16[i])
+ {
+ printf ("tst_vextq_u8[%d]=%d expected %d\n",
+ i, mem_u8x16.buf[i], expected_u8x16[i]);
+ abort ();
+ }
+
+ result_u8x16 = tst_vextq_u8_rotate (vec_u8x16);
+ vst1q_u8 (mem_u8x16.buf, result_u8x16);
+
+ for (i=0; i<16; i++)
+ if (mem_u8x16.buf[i] != expected_rot_u8x16[i])
+ {
+ printf ("tst_vextq_u8_rotate[%d]=%d expected %d\n",
+ i, mem_u8x16.buf[i], expected_rot_u8x16[i]);
+ abort ();
+ }
+
+ result_u16x8 = tst_vextq_u16 (vec_u16x8, vec2_u16x8);
+ vst1q_u16 (mem_u16x8.buf, result_u16x8);
+
+ for (i=0; i<8; i++)
+ if (mem_u16x8.buf[i] != expected_u16x8[i])
+ {
+ printf ("tst_vextq_u16[%d]=%d expected %d\n",
+ i, mem_u16x8.buf[i], expected_u16x8[i]);
+ abort ();
+ }
+
+ result_u16x8 = tst_vextq_u16_rotate (vec_u16x8);
+ vst1q_u16 (mem_u16x8.buf, result_u16x8);
+
+ for (i=0; i<8; i++)
+ if (mem_u16x8.buf[i] != expected_rot_u16x8[i])
+ {
+ printf ("tst_vextq_u16_rotate[%d]=%d expected %d\n",
+ i, mem_u16x8.buf[i], expected_rot_u16x8[i]);
+ abort ();
+ }
+
+ result_u32x4 = tst_vextq_u32 (vec_u32x4, vec2_u32x4);
+ vst1q_u32 (mem_u32x4.buf, result_u32x4);
+
+ for (i=0; i<4; i++)
+ if (mem_u32x4.buf[i] != expected_u32x4[i])
+ {
+ printf ("tst_vextq_u32[%d]=%d expected %d\n",
+ i, mem_u32x4.buf[i], expected_u32x4[i]);
+ abort ();
+ }
+
+ result_u32x4 = tst_vextq_u32_rotate (vec_u32x4);
+ vst1q_u32 (mem_u32x4.buf, result_u32x4);
+
+ for (i=0; i<4; i++)
+ if (mem_u32x4.buf[i] != expected_rot_u32x4[i])
+ {
+ printf ("tst_vextq_u32_rotate[%d]=%d expected %d\n",
+ i, mem_u32x4.buf[i], expected_rot_u32x4[i]);
+ abort ();
+ }
+
+ result_u64x2 = tst_vextq_u64 (vec_u64x2, vec2_u64x2);
+ vst1q_u64 (mem_u64x2.buf, result_u64x2);
+
+ for (i=0; i<2; i++)
+ if (mem_u64x2.buf[i] != expected_u64x2[i])
+ {
+ printf ("tst_vextq_u64[%d]=%lld expected %lld\n",
+ i, mem_u64x2.buf[i], expected_u64x2[i]);
+ abort ();
+ }
+
+ result_u64x2 = tst_vextq_u64_rotate (vec_u64x2);
+ vst1q_u64 (mem_u64x2.buf, result_u64x2);
+
+ for (i=0; i<2; i++)
+ if (mem_u64x2.buf[i] != expected_rot_u64x2[i])
+ {
+ printf ("tst_vextq_u64_rotate[%d]=%lld expected %lld\n",
+ i, mem_u64x2.buf[i], expected_rot_u64x2[i]);
+ abort ();
+ }
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/arm/neon-vext.c b/gcc/testsuite/gcc.target/arm/neon-vext.c
new file mode 100644
index 00000000000..4a012a996a8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/neon-vext.c
@@ -0,0 +1,115 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-require-effective-target arm_little_endian } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_neon } */
+
+#include <arm_neon.h>
+
+uint8x8_t
+tst_vext_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ uint8x8_t __mask1 = {2, 3, 4, 5, 6, 7, 8, 9};
+
+ return __builtin_shuffle ( __a, __b, __mask1) ;
+}
+
+uint8x8_t
+tst_vext_u8_rotate (uint8x8_t __a)
+{
+ uint8x8_t __mask1 = {2, 3, 4, 5, 6, 7, 0, 1};
+ return __builtin_shuffle ( __a, __mask1) ;
+}
+
+uint16x4_t
+tst_vext_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ uint16x4_t __mask1 = {2, 3, 4, 5};
+ return __builtin_shuffle ( __a, __b, __mask1) ;
+}
+
+uint16x4_t
+tst_vext_u16_rotate (uint16x4_t __a)
+{
+ uint16x4_t __mask1 = {2, 3, 0, 1};
+ return __builtin_shuffle ( __a, __mask1) ;
+}
+
+uint32x2_t
+tst_vext_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ uint32x2_t __mask1 = {1, 2};
+ return __builtin_shuffle ( __a, __b, __mask1) ;
+}
+
+/* This one is mapped into vrev64.32. */
+uint32x2_t
+tst_vext_u32_rotate (uint32x2_t __a)
+{
+ uint32x2_t __mask1 = {1, 0};
+ return __builtin_shuffle ( __a, __mask1) ;
+}
+
+uint8x16_t
+tst_vextq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ uint8x16_t __mask1 = {4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19};
+ return __builtin_shuffle ( __a, __b, __mask1) ;
+}
+
+uint8x16_t
+tst_vextq_u8_rotate (uint8x16_t __a)
+{
+ uint8x16_t __mask1 = {4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 0, 1, 2, 3};
+ return __builtin_shuffle ( __a, __mask1) ;
+}
+
+uint16x8_t
+tst_vextq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ uint16x8_t __mask1 = {2, 3, 4, 5, 6, 7, 8, 9};
+ return __builtin_shuffle ( __a, __b, __mask1) ;
+}
+
+uint16x8_t
+tst_vextq_u16_rotate (uint16x8_t __a)
+{
+ uint16x8_t __mask1 = {2, 3, 4, 5, 6, 7, 0, 1};
+ return __builtin_shuffle ( __a, __mask1) ;
+}
+
+uint32x4_t
+tst_vextq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ uint32x4_t __mask1 = {1, 2, 3, 4};
+ return __builtin_shuffle ( __a, __b, __mask1) ;
+}
+
+uint32x4_t
+tst_vextq_u32_rotate (uint32x4_t __a)
+{
+ uint32x4_t __mask1 = {1, 2, 3, 0};
+ return __builtin_shuffle ( __a, __mask1) ;
+}
+
+uint64x2_t
+tst_vextq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ uint64x2_t __mask1 = {1, 2};
+ return __builtin_shuffle ( __a, __b, __mask1) ;
+}
+
+uint64x2_t
+tst_vextq_u64_rotate (uint64x2_t __a)
+{
+ uint64x2_t __mask1 = {1, 0};
+ return __builtin_shuffle ( __a, __mask1) ;
+}
+
+/* { dg-final {scan-assembler-times "vext\.8\\t" 4} } */
+/* { dg-final {scan-assembler-times "vext\.16\\t" 4} } */
+/* { dg-final {scan-assembler-times "vext\.32\\t" 3} } */
+/* { dg-final {scan-assembler-times "vrev64\.32\\t" 1} } */
+/* { dg-final {scan-assembler-times "vext\.64\\t" 2} } */
diff --git a/gcc/testsuite/gfortran.dg/select_type_29.f03 b/gcc/testsuite/gfortran.dg/select_type_29.f03
new file mode 100644
index 00000000000..71603e3841a
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/select_type_29.f03
@@ -0,0 +1,26 @@
+! { dg-do compile }
+!
+! PR 54435: [4.7/4.8 Regression] ICE with SELECT TYPE on a non-CLASS object
+!
+! Contributed by xarthisius
+
+subroutine foo(x)
+ integer :: x
+ select type (x) ! { dg-error "Selector shall be polymorphic" }
+ end select
+end
+
+
+! PR 54443: [4.7/4.8 Regression] Segmentation Fault when Compiling for code using Fortran Polymorphic Entities
+!
+! Contributed by Mark Beyer <mbeyer@cirrusaircraft.com>
+
+program class_test
+ type hashnode
+ character(4) :: htype
+ end type
+ class(hashnode), pointer :: hp
+
+ select type(hp%htype) ! { dg-error "is not a named variable" }
+
+end program
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index 247f6166a9d..44715271822 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -1334,26 +1334,11 @@ group_case_labels_stmt (gimple stmt)
int old_size = gimple_switch_num_labels (stmt);
int i, j, new_size = old_size;
basic_block default_bb = NULL;
- bool has_default;
- /* The default label is always the first case in a switch
- statement after gimplification if it was not optimized
- away */
- if (!CASE_LOW (gimple_switch_default_label (stmt))
- && !CASE_HIGH (gimple_switch_default_label (stmt)))
- {
- tree default_case = gimple_switch_default_label (stmt);
- default_bb = label_to_block (CASE_LABEL (default_case));
- has_default = true;
- }
- else
- has_default = false;
+ default_bb = label_to_block (CASE_LABEL (gimple_switch_default_label (stmt)));
/* Look for possible opportunities to merge cases. */
- if (has_default)
- i = 1;
- else
- i = 0;
+ i = 1;
while (i < old_size)
{
tree base_case, base_high;
@@ -4148,7 +4133,7 @@ verify_gimple_switch (gimple stmt)
return true;
}
- elt = gimple_switch_default_label (stmt);
+ elt = gimple_switch_label (stmt, 0);
if (CASE_LOW (elt) != NULL_TREE || CASE_HIGH (elt) != NULL_TREE)
{
error ("invalid default case label in switch statement");
@@ -5530,9 +5515,10 @@ add_phi_args_after_copy (basic_block *region_copy, unsigned n_region,
important exit edge EXIT. By important we mean that no SSA name defined
inside region is live over the other exit edges of the region. All entry
edges to the region must go to ENTRY->dest. The edge ENTRY is redirected
- to the duplicate of the region. SSA form, dominance and loop information
- is updated. The new basic blocks are stored to REGION_COPY in the same
- order as they had in REGION, provided that REGION_COPY is not NULL.
+ to the duplicate of the region. Dominance and loop information is
+ updated, but not the SSA web. The new basic blocks are stored to
+ REGION_COPY in the same order as they had in REGION, provided that
+ REGION_COPY is not NULL.
The function returns false if it is unable to copy the region,
true otherwise. */
@@ -5593,8 +5579,6 @@ gimple_duplicate_sese_region (edge entry, edge exit,
free_region_copy = true;
}
- gcc_assert (!need_ssa_update_p (cfun));
-
/* Record blocks outside the region that are dominated by something
inside. */
doms = NULL;
@@ -5663,9 +5647,6 @@ gimple_duplicate_sese_region (edge entry, edge exit,
/* Add the other PHI node arguments. */
add_phi_args_after_copy (region_copy, n_region, NULL);
- /* Update the SSA web. */
- update_ssa (TODO_update_ssa);
-
if (free_region_copy)
free (region_copy);
diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c
index 9220931ffff..ec74d9d4c41 100644
--- a/gcc/tree-eh.c
+++ b/gcc/tree-eh.c
@@ -1487,8 +1487,8 @@ lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
/* Build the switch statement, setting last_case to be the default
label. */
- switch_stmt = gimple_build_switch_vec (finally_tmp, last_case,
- case_label_vec);
+ switch_stmt = gimple_build_switch (finally_tmp, last_case,
+ case_label_vec);
gimple_set_location (switch_stmt, finally_loc);
/* Need to link SWITCH_STMT after running replace_goto_queue
@@ -3376,7 +3376,7 @@ lower_eh_dispatch (basic_block src, gimple stmt)
default_label = build_case_label (NULL, NULL, default_label);
sort_case_labels (labels);
- x = gimple_build_switch_vec (filter, default_label, labels);
+ x = gimple_build_switch (filter, default_label, labels);
gsi_insert_before (&gsi, x, GSI_SAME_STMT);
VEC_free (tree, heap, labels);
diff --git a/gcc/tree-if-conv.c b/gcc/tree-if-conv.c
index 1086004d3fb..e9f65adddbd 100644
--- a/gcc/tree-if-conv.c
+++ b/gcc/tree-if-conv.c
@@ -307,6 +307,65 @@ fold_or_predicates (location_t loc, tree c1, tree c2)
return fold_build2_loc (loc, TRUTH_OR_EXPR, boolean_type_node, c1, c2);
}
+/* Returns true if N is either a constant or a SSA_NAME. */
+
+static bool
+constant_or_ssa_name (tree n)
+{
+ switch (TREE_CODE (n))
+ {
+ case SSA_NAME:
+ case INTEGER_CST:
+ case REAL_CST:
+ case COMPLEX_CST:
+ case VECTOR_CST:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Returns either a COND_EXPR or the folded expression if the folded
+ expression is a MIN_EXPR, a MAX_EXPR, an ABS_EXPR,
+ a constant or a SSA_NAME. */
+
+static tree
+fold_build_cond_expr (tree type, tree cond, tree rhs, tree lhs)
+{
+ tree rhs1, lhs1, cond_expr;
+ cond_expr = fold_ternary (COND_EXPR, type, cond,
+ rhs, lhs);
+
+ if (cond_expr == NULL_TREE)
+ return build3 (COND_EXPR, type, cond, rhs, lhs);
+
+ STRIP_USELESS_TYPE_CONVERSION (cond_expr);
+
+ if (constant_or_ssa_name (cond_expr))
+ return cond_expr;
+
+ if (TREE_CODE (cond_expr) == ABS_EXPR)
+ {
+ rhs1 = TREE_OPERAND (cond_expr, 1);
+ STRIP_USELESS_TYPE_CONVERSION (rhs1);
+ if (constant_or_ssa_name (rhs1))
+ return build1 (ABS_EXPR, type, rhs1);
+ }
+
+ if (TREE_CODE (cond_expr) == MIN_EXPR
+ || TREE_CODE (cond_expr) == MAX_EXPR)
+ {
+ lhs1 = TREE_OPERAND (cond_expr, 0);
+ STRIP_USELESS_TYPE_CONVERSION (lhs1);
+ rhs1 = TREE_OPERAND (cond_expr, 1);
+ STRIP_USELESS_TYPE_CONVERSION (rhs1);
+ if (constant_or_ssa_name (rhs1)
+ && constant_or_ssa_name (lhs1))
+ return build2 (TREE_CODE (cond_expr), type, lhs1, rhs1);
+ }
+ return build3 (COND_EXPR, type, cond, rhs, lhs);
+}
+
/* Add condition NC to the predicate list of basic block BB. */
static inline void
@@ -1293,8 +1352,8 @@ predicate_scalar_phi (gimple phi, tree cond,
|| bb_postdominates_preds (bb));
/* Build new RHS using selected condition and arguments. */
- rhs = build3 (COND_EXPR, TREE_TYPE (res),
- unshare_expr (cond), arg_0, arg_1);
+ rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
+ arg_0, arg_1);
}
new_stmt = gimple_build_assign (res, rhs);
@@ -1554,7 +1613,7 @@ predicate_mem_writes (loop_p loop)
cond = force_gimple_operand_gsi_1 (&gsi, unshare_expr (cond),
is_gimple_condexpr, NULL_TREE,
true, GSI_SAME_STMT);
- rhs = build3 (COND_EXPR, type, unshare_expr (cond), rhs, lhs);
+ rhs = fold_build_cond_expr (type, unshare_expr (cond), rhs, lhs);
gimple_assign_set_rhs1 (stmt, ifc_temp_var (type, rhs, &gsi));
update_stmt (stmt);
}
diff --git a/gcc/tree-ssa-loop-ch.c b/gcc/tree-ssa-loop-ch.c
index 46097c2bea3..c43819ab3d8 100644
--- a/gcc/tree-ssa-loop-ch.c
+++ b/gcc/tree-ssa-loop-ch.c
@@ -241,6 +241,7 @@ copy_loop_headers (void)
split_edge (loop_latch_edge (loop));
}
+ update_ssa (TODO_update_ssa);
free (bbs);
free (copied_bbs);
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index 6d10df87a0e..bb1a03dfe99 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -368,9 +368,7 @@ typedef struct bitmap_set
EXECUTE_IF_SET_IN_BITMAP(&(set)->values, 0, (id), (bi))
/* Mapping from value id to expressions with that value_id. */
-DEF_VEC_P (bitmap_set_t);
-DEF_VEC_ALLOC_P (bitmap_set_t, heap);
-static VEC(bitmap_set_t, heap) *value_expressions;
+static VEC(bitmap, heap) *value_expressions;
/* Sets that we need to keep track of. */
typedef struct bb_bitmap_sets
@@ -580,24 +578,23 @@ phi_trans_add (pre_expr e, pre_expr v, basic_block pred)
static void
add_to_value (unsigned int v, pre_expr e)
{
- bitmap_set_t set;
+ bitmap set;
- gcc_assert (get_expr_value_id (e) == v);
+ gcc_checking_assert (get_expr_value_id (e) == v);
- if (v >= VEC_length (bitmap_set_t, value_expressions))
+ if (v >= VEC_length (bitmap, value_expressions))
{
- VEC_safe_grow_cleared (bitmap_set_t, heap, value_expressions,
- v + 1);
+ VEC_safe_grow_cleared (bitmap, heap, value_expressions, v + 1);
}
- set = VEC_index (bitmap_set_t, value_expressions, v);
+ set = VEC_index (bitmap, value_expressions, v);
if (!set)
{
- set = bitmap_set_new ();
- VEC_replace (bitmap_set_t, value_expressions, v, set);
+ set = BITMAP_ALLOC (&grand_bitmap_obstack);
+ VEC_replace (bitmap, value_expressions, v, set);
}
- bitmap_insert_into_set_1 (set, e, v, true);
+ bitmap_set_bit (set, get_or_alloc_expression_id (e));
}
/* Create a new bitmap set and return it. */
@@ -717,8 +714,8 @@ sorted_array_from_bitmap_set (bitmap_set_t set)
If this is somehow a significant lose for some cases, we can
choose which set to walk based on the set size. */
- bitmap_set_t exprset = VEC_index (bitmap_set_t, value_expressions, i);
- FOR_EACH_EXPR_ID_IN_SET (exprset, j, bj)
+ bitmap exprset = VEC_index (bitmap, value_expressions, i);
+ EXECUTE_IF_SET_IN_BITMAP (exprset, 0, j, bj)
{
if (bitmap_bit_p (&set->expressions, j))
VEC_safe_push (pre_expr, heap, result, expression_for_id (j));
@@ -824,7 +821,7 @@ static void
bitmap_set_replace_value (bitmap_set_t set, unsigned int lookfor,
const pre_expr expr)
{
- bitmap_set_t exprset;
+ bitmap exprset;
unsigned int i;
bitmap_iterator bi;
@@ -843,8 +840,8 @@ bitmap_set_replace_value (bitmap_set_t set, unsigned int lookfor,
5-10x faster than walking the bitmap. If this is somehow a
significant lose for some cases, we can choose which set to walk
based on the set size. */
- exprset = VEC_index (bitmap_set_t, value_expressions, lookfor);
- FOR_EACH_EXPR_ID_IN_SET (exprset, i, bi)
+ exprset = VEC_index (bitmap, value_expressions, lookfor);
+ EXECUTE_IF_SET_IN_BITMAP (exprset, 0, i, bi)
{
if (bitmap_clear_bit (&set->expressions, i))
{
@@ -1042,12 +1039,14 @@ debug_bitmap_sets_for (basic_block bb)
static void
print_value_expressions (FILE *outfile, unsigned int val)
{
- bitmap_set_t set = VEC_index (bitmap_set_t, value_expressions, val);
+ bitmap set = VEC_index (bitmap, value_expressions, val);
if (set)
{
+ bitmap_set x;
char s[10];
sprintf (s, "%04d", val);
- print_bitmap_set (outfile, set, s, 0);
+ x.expressions = *set;
+ print_bitmap_set (outfile, &x, s, 0);
}
}
@@ -1095,9 +1094,9 @@ get_constant_for_value_id (unsigned int v)
{
unsigned int i;
bitmap_iterator bi;
- bitmap_set_t exprset = VEC_index (bitmap_set_t, value_expressions, v);
+ bitmap exprset = VEC_index (bitmap, value_expressions, v);
- FOR_EACH_EXPR_ID_IN_SET (exprset, i, bi)
+ EXECUTE_IF_SET_IN_BITMAP (exprset, 0, i, bi)
{
pre_expr expr = expression_for_id (i);
if (expr->kind == CONSTANT)
@@ -1377,9 +1376,8 @@ get_representative_for (const pre_expr e)
and pick out an SSA_NAME. */
unsigned int i;
bitmap_iterator bi;
- bitmap_set_t exprs = VEC_index (bitmap_set_t, value_expressions,
- value_id);
- FOR_EACH_EXPR_ID_IN_SET (exprs, i, bi)
+ bitmap exprs = VEC_index (bitmap, value_expressions, value_id);
+ EXECUTE_IF_SET_IN_BITMAP (exprs, 0, i, bi)
{
pre_expr rep = expression_for_id (i);
if (rep->kind == NAME)
@@ -1499,7 +1497,7 @@ phi_translate_1 (pre_expr expr, bitmap_set_t set1, bitmap_set_t set2,
else
{
new_val_id = get_next_value_id ();
- VEC_safe_grow_cleared (bitmap_set_t, heap,
+ VEC_safe_grow_cleared (bitmap, heap,
value_expressions,
get_max_value_id() + 1);
nary = vn_nary_op_insert_pieces (newnary->length,
@@ -1698,7 +1696,7 @@ phi_translate_1 (pre_expr expr, bitmap_set_t set1, bitmap_set_t set2,
if (changed || !same_valid)
{
new_val_id = get_next_value_id ();
- VEC_safe_grow_cleared (bitmap_set_t, heap,
+ VEC_safe_grow_cleared (bitmap, heap,
value_expressions,
get_max_value_id() + 1);
}
@@ -1851,9 +1849,9 @@ bitmap_find_leader (bitmap_set_t set, unsigned int val, gimple stmt)
{
unsigned int i;
bitmap_iterator bi;
- bitmap_set_t exprset = VEC_index (bitmap_set_t, value_expressions, val);
+ bitmap exprset = VEC_index (bitmap, value_expressions, val);
- FOR_EACH_EXPR_ID_IN_SET (exprset, i, bi)
+ EXECUTE_IF_SET_IN_BITMAP (exprset, 0, i, bi)
{
pre_expr expr = expression_for_id (i);
if (expr->kind == CONSTANT)
@@ -1875,10 +1873,9 @@ bitmap_find_leader (bitmap_set_t set, unsigned int val, gimple stmt)
choose which set to walk based on which set is smaller. */
unsigned int i;
bitmap_iterator bi;
- bitmap_set_t exprset = VEC_index (bitmap_set_t, value_expressions, val);
+ bitmap exprset = VEC_index (bitmap, value_expressions, val);
- EXECUTE_IF_AND_IN_BITMAP (&exprset->expressions,
- &set->expressions, 0, i, bi)
+ EXECUTE_IF_AND_IN_BITMAP (exprset, &set->expressions, 0, i, bi)
{
pre_expr val = expression_for_id (i);
/* At the point where stmt is not null, there should always
@@ -2916,14 +2913,14 @@ find_or_generate_expression (basic_block block, pre_expr expr,
if (genop == NULL
&& !domstmt)
{
- bitmap_set_t exprset;
+ bitmap exprset;
unsigned int lookfor = get_expr_value_id (expr);
bool handled = false;
bitmap_iterator bi;
unsigned int i;
- exprset = VEC_index (bitmap_set_t, value_expressions, lookfor);
- FOR_EACH_EXPR_ID_IN_SET (exprset, i, bi)
+ exprset = VEC_index (bitmap, value_expressions, lookfor);
+ EXECUTE_IF_SET_IN_BITMAP (exprset, 0, i, bi)
{
pre_expr temp = expression_for_id (i);
if (temp->kind != NAME)
@@ -3542,11 +3539,10 @@ do_regular_insertion (basic_block block, basic_block dom)
{
unsigned int j;
bitmap_iterator bi;
- bitmap_set_t exprset = VEC_index (bitmap_set_t,
- value_expressions, val);
+ bitmap exprset = VEC_index (bitmap, value_expressions, val);
unsigned int new_val = get_expr_value_id (edoubleprime);
- FOR_EACH_EXPR_ID_IN_SET (exprset, j, bi)
+ EXECUTE_IF_SET_IN_BITMAP (exprset, 0, j, bi)
{
pre_expr expr = expression_for_id (j);
@@ -3786,14 +3782,15 @@ insert (void)
static void
add_to_exp_gen (basic_block block, tree op)
{
- if (!in_fre)
- {
- pre_expr result;
- if (TREE_CODE (op) == SSA_NAME && ssa_undefined_value_p (op))
- return;
- result = get_or_alloc_expr_for_name (op);
- bitmap_value_insert_into_set (EXP_GEN (block), result);
- }
+ pre_expr result;
+
+ gcc_checking_assert (!in_fre);
+
+ if (TREE_CODE (op) == SSA_NAME && ssa_undefined_value_p (op))
+ return;
+
+ result = get_or_alloc_expr_for_name (op);
+ bitmap_value_insert_into_set (EXP_GEN (block), result);
}
/* Create value ids for PHI in BLOCK. */
@@ -3805,23 +3802,23 @@ make_values_for_phi (gimple phi, basic_block block)
/* We have no need for virtual phis, as they don't represent
actual computations. */
- if (!virtual_operand_p (result))
+ if (virtual_operand_p (result))
+ return;
+
+ pre_expr e = get_or_alloc_expr_for_name (result);
+ add_to_value (get_expr_value_id (e), e);
+ bitmap_value_insert_into_set (AVAIL_OUT (block), e);
+ if (!in_fre)
{
- pre_expr e = get_or_alloc_expr_for_name (result);
- add_to_value (get_expr_value_id (e), e);
+ unsigned i;
bitmap_insert_into_set (PHI_GEN (block), e);
- bitmap_value_insert_into_set (AVAIL_OUT (block), e);
- if (!in_fre)
+ for (i = 0; i < gimple_phi_num_args (phi); ++i)
{
- unsigned i;
- for (i = 0; i < gimple_phi_num_args (phi); ++i)
+ tree arg = gimple_phi_arg_def (phi, i);
+ if (TREE_CODE (arg) == SSA_NAME)
{
- tree arg = gimple_phi_arg_def (phi, i);
- if (TREE_CODE (arg) == SSA_NAME)
- {
- e = get_or_alloc_expr_for_name (arg);
- add_to_value (get_expr_value_id (e), e);
- }
+ e = get_or_alloc_expr_for_name (arg);
+ add_to_value (get_expr_value_id (e), e);
}
}
}
@@ -3934,6 +3931,10 @@ compute_avail (void)
bitmap_value_insert_into_set (AVAIL_OUT (block), e);
}
+ /* That's all we need to do when doing FRE. */
+ if (in_fre)
+ continue;
+
if (gimple_has_side_effects (stmt) || stmt_could_throw_p (stmt))
continue;
@@ -3992,8 +3993,7 @@ compute_avail (void)
get_or_alloc_expression_id (result);
add_to_value (get_expr_value_id (result), result);
- if (!in_fre)
- bitmap_value_insert_into_set (EXP_GEN (block), result);
+ bitmap_value_insert_into_set (EXP_GEN (block), result);
}
continue;
}
@@ -4105,8 +4105,7 @@ compute_avail (void)
get_or_alloc_expression_id (result);
add_to_value (get_expr_value_id (result), result);
- if (!in_fre)
- bitmap_value_insert_into_set (EXP_GEN (block), result);
+ bitmap_value_insert_into_set (EXP_GEN (block), result);
continue;
}
@@ -4733,15 +4732,15 @@ my_rev_post_order_compute (int *post_order, bool include_entry_exit)
src = ei_edge (ei)->src;
dest = ei_edge (ei)->dest;
- /* Check if the edge destination has been visited yet. */
+ /* Check if the edge source has been visited yet. */
if (src != ENTRY_BLOCK_PTR && ! TEST_BIT (visited, src->index))
{
/* Mark that we have visited the destination. */
SET_BIT (visited, src->index);
if (EDGE_COUNT (src->preds) > 0)
- /* Since the DEST node has been visited for the first
- time, check its successors. */
+ /* Since the SRC node has been visited for the first
+ time, check its predecessors. */
stack[sp++] = ei_start (src->preds);
else
post_order[post_order_num++] = src->index;
@@ -4777,8 +4776,8 @@ init_pre (bool do_fre)
next_expression_id = 1;
expressions = NULL;
VEC_safe_push (pre_expr, heap, expressions, (pre_expr)NULL);
- value_expressions = VEC_alloc (bitmap_set_t, heap, get_max_value_id () + 1);
- VEC_safe_grow_cleared (bitmap_set_t, heap, value_expressions,
+ value_expressions = VEC_alloc (bitmap, heap, get_max_value_id () + 1);
+ VEC_safe_grow_cleared (bitmap, heap, value_expressions,
get_max_value_id() + 1);
name_to_id = NULL;
@@ -4807,9 +4806,12 @@ init_pre (bool do_fre)
sizeof (struct pre_expr_d), 30);
FOR_ALL_BB (bb)
{
- EXP_GEN (bb) = bitmap_set_new ();
- PHI_GEN (bb) = bitmap_set_new ();
- TMP_GEN (bb) = bitmap_set_new ();
+ if (!do_fre)
+ {
+ EXP_GEN (bb) = bitmap_set_new ();
+ PHI_GEN (bb) = bitmap_set_new ();
+ TMP_GEN (bb) = bitmap_set_new ();
+ }
AVAIL_OUT (bb) = bitmap_set_new ();
}
@@ -4827,7 +4829,7 @@ fini_pre (bool do_fre)
bool do_ab_cleanup = !bitmap_empty_p (need_ab_cleanup);
free (postorder);
- VEC_free (bitmap_set_t, heap, value_expressions);
+ VEC_free (bitmap, heap, value_expressions);
BITMAP_FREE (inserted_exprs);
bitmap_obstack_release (&grand_bitmap_obstack);
free_alloc_pool (bitmap_set_pool);
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index 5d5a91cef7c..fed7c55e5f0 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -1007,7 +1007,7 @@ vn_reference_fold_indirect (VEC (vn_reference_op_s, heap) **ops,
vn_reference_op_t op = &VEC_index (vn_reference_op_s, *ops, i);
vn_reference_op_t mem_op = &VEC_index (vn_reference_op_s, *ops, i - 1);
tree addr_base;
- HOST_WIDE_INT addr_offset;
+ HOST_WIDE_INT addr_offset = 0;
/* The only thing we have to do is from &OBJ.foo.bar add the offset
from .foo.bar to the preceding MEM_REF offset and replace the
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index 609ee2c56b3..6e71d5504dc 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -4527,6 +4527,18 @@ find_func_aliases (gimple origt)
&& !POINTER_TYPE_P (TREE_TYPE (rhsop))))
|| gimple_assign_single_p (t))
get_constraint_for_rhs (rhsop, &rhsc);
+ else if (code == COND_EXPR)
+ {
+ /* The result is a merge of both COND_EXPR arms. */
+ VEC (ce_s, heap) *tmp = NULL;
+ struct constraint_expr *rhsp;
+ unsigned i;
+ get_constraint_for_rhs (gimple_assign_rhs2 (t), &rhsc);
+ get_constraint_for_rhs (gimple_assign_rhs3 (t), &tmp);
+ FOR_EACH_VEC_ELT (ce_s, tmp, i, rhsp)
+ VEC_safe_push (ce_s, heap, rhsc, rhsp);
+ VEC_free (ce_s, heap, tmp);
+ }
else if (truth_value_p (code))
/* Truth value results are not pointer (parts). Or at least
very very unreasonable obfuscation of a part. */
diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c
index 86ad74f1bc8..30336a18e81 100644
--- a/gcc/tree-ssa-threadupdate.c
+++ b/gcc/tree-ssa-threadupdate.c
@@ -1037,11 +1037,21 @@ thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
}
free (bblocks);
+ /* If the new header has multiple latches mark it so. */
+ FOR_EACH_EDGE (e, ei, loop->header->preds)
+ if (e->src->loop_father == loop
+ && e->src != loop->latch)
+ {
+ loop->latch = NULL;
+ loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
+ }
+
/* Cancel remaining threading requests that would make the
loop a multiple entry loop. */
FOR_EACH_EDGE (e, ei, header->preds)
{
edge e2;
+
if (e->aux == NULL)
continue;
diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c
index df88ddf77e8..87baefc07cf 100644
--- a/gcc/tree-switch-conversion.c
+++ b/gcc/tree-switch-conversion.c
@@ -318,7 +318,7 @@ emit_case_bit_tests (gimple swtch, tree index_expr,
memset (&test, 0, sizeof (test));
/* Get the edge for the default case. */
- tmp = gimple_switch_label (swtch, 0);
+ tmp = gimple_switch_default_label (swtch);
default_bb = label_to_block (CASE_LABEL (tmp));
default_edge = find_edge (switch_bb, default_bb);
@@ -612,14 +612,12 @@ collect_switch_conv_info (gimple swtch, struct switch_conv_info *info)
memset (info, 0, sizeof (*info));
/* The gimplifier has already sorted the cases by CASE_LOW and ensured there
- is a default label which is the first in the vector. */
- gcc_assert (CASE_LOW (gimple_switch_label (swtch, 0)) == NULL_TREE);
-
- /* Collect the bits we can deduce from the CFG. */
+ is a default label which is the first in the vector.
+ Collect the bits we can deduce from the CFG. */
info->index_expr = gimple_switch_index (swtch);
info->switch_bb = gimple_bb (swtch);
info->default_bb =
- label_to_block (CASE_LABEL (gimple_switch_label (swtch, 0)));
+ label_to_block (CASE_LABEL (gimple_switch_default_label (swtch)));
e_default = find_edge (info->switch_bb, info->default_bb);
info->default_prob = e_default->probability;
info->default_count = e_default->count;
@@ -1393,7 +1391,7 @@ process_switch (gimple swtch)
transformation. */
create_temp_arrays (&info);
- gather_default_values (gimple_switch_label (swtch, 0), &info);
+ gather_default_values (gimple_switch_default_label (swtch), &info);
build_constructors (swtch, &info);
build_arrays (swtch, &info); /* Build the static arrays and assignments. */
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index f949e8b9bee..067b60f168f 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -9142,7 +9142,7 @@ execute_vrp (void)
/* As we may have replaced the default label with a regular one
make sure to make it a real default label again. This ensures
optimal expansion. */
- label = gimple_switch_default_label (su->stmt);
+ label = gimple_switch_label (su->stmt, 0);
CASE_LOW (label) = NULL_TREE;
CASE_HIGH (label) = NULL_TREE;
}
diff --git a/gcc/valtrack.c b/gcc/valtrack.c
index b4eb5785ba3..2cdb06b174e 100644
--- a/gcc/valtrack.c
+++ b/gcc/valtrack.c
@@ -333,6 +333,14 @@ dead_debug_insert_temp (struct dead_debug *debug, unsigned int uregno,
{
if (DF_REF_REGNO (cur->use) == uregno)
{
+ /* If this loc has been changed e.g. to debug_expr already
+ as part of a multi-register use, just drop it. */
+ if (!REG_P (*DF_REF_REAL_LOC (cur->use)))
+ {
+ *tailp = cur->next;
+ XDELETE (cur);
+ continue;
+ }
*usesp = cur;
usesp = &cur->next;
*tailp = cur->next;
diff --git a/gcc/vec.c b/gcc/vec.c
index 51a55d95fbf..be9f54a5b57 100644
--- a/gcc/vec.c
+++ b/gcc/vec.c
@@ -175,8 +175,8 @@ calculate_allocation (const struct vec_prefix *pfx, int reserve, bool exact)
if (pfx)
{
- alloc = pfx->alloc;
- num = pfx->num;
+ alloc = pfx->alloc_;
+ num = pfx->num_;
}
else if (!reserve)
/* If there's no prefix, and we've not requested anything, then we
@@ -240,9 +240,9 @@ vec_gc_o_reserve_1 (void *vec, int reserve, size_t vec_offset, size_t elt_size,
vec = ggc_realloc_stat (vec, size PASS_MEM_STAT);
- ((struct vec_prefix *)vec)->alloc = alloc;
+ ((struct vec_prefix *)vec)->alloc_ = alloc;
if (!pfx)
- ((struct vec_prefix *)vec)->num = 0;
+ ((struct vec_prefix *)vec)->num_ = 0;
return vec;
}
@@ -268,9 +268,9 @@ vec_heap_o_reserve_1 (void *vec, int reserve, size_t vec_offset,
free_overhead (pfx);
vec = xrealloc (vec, vec_offset + alloc * elt_size);
- ((struct vec_prefix *)vec)->alloc = alloc;
+ ((struct vec_prefix *)vec)->alloc_ = alloc;
if (!pfx)
- ((struct vec_prefix *)vec)->num = 0;
+ ((struct vec_prefix *)vec)->num_ = 0;
if (GATHER_STATISTICS && vec)
register_overhead ((struct vec_prefix *)vec,
vec_offset + alloc * elt_size FINAL_PASS_MEM_STAT);
@@ -306,8 +306,8 @@ vec_stack_p_reserve_exact_1 (int alloc, void *space)
VEC_safe_push (void_p, heap, stack_vecs, space);
- pfx->num = 0;
- pfx->alloc = alloc;
+ pfx->num_ = 0;
+ pfx->alloc_ = alloc;
return space;
}
@@ -343,15 +343,15 @@ vec_stack_o_reserve_1 (void *vec, int reserve, size_t vec_offset,
}
/* Move VEC to the heap. */
- reserve += ((struct vec_prefix *) vec)->num;
+ reserve += ((struct vec_prefix *) vec)->num_;
newvec = vec_heap_o_reserve_1 (NULL, reserve, vec_offset, elt_size,
exact PASS_MEM_STAT);
if (newvec && vec)
{
- ((struct vec_prefix *) newvec)->num = ((struct vec_prefix *) vec)->num;
+ ((struct vec_prefix *) newvec)->num_ = ((struct vec_prefix *) vec)->num_;
memcpy (((struct vec_prefix *) newvec)+1,
((struct vec_prefix *) vec)+1,
- ((struct vec_prefix *) vec)->num * elt_size);
+ ((struct vec_prefix *) vec)->num_ * elt_size);
}
return newvec;
}
diff --git a/gcc/vec.h b/gcc/vec.h
index 1922616fc13..441c9b5f791 100644
--- a/gcc/vec.h
+++ b/gcc/vec.h
@@ -25,26 +25,34 @@ along with GCC; see the file COPYING3. If not see
#include "statistics.h" /* For MEM_STAT_DECL. */
-/* The macros here implement a set of templated vector types and
- associated interfaces. These templates are implemented with
- macros, as we're not in C++ land. The interface functions are
- typesafe and use static inline functions, sometimes backed by
- out-of-line generic functions. The vectors are designed to
- interoperate with the GTY machinery.
-
- Because of the different behavior of structure objects, scalar
- objects and of pointers, there are three flavors, one for each of
- these variants. Both the structure object and pointer variants
- pass pointers to objects around -- in the former case the pointers
- are stored into the vector and in the latter case the pointers are
- dereferenced and the objects copied into the vector. The scalar
- object variant is suitable for int-like objects, and the vector
- elements are returned by value.
-
- There are both 'index' and 'iterate' accessors. The iterator
- returns a boolean iteration condition and updates the iteration
- variable passed by reference. Because the iterator will be
- inlined, the address-of can be optimized away.
+/* Templated vector type and associated interfaces.
+
+ The interface functions are typesafe and use inline functions,
+ sometimes backed by out-of-line generic functions. The vectors are
+ designed to interoperate with the GTY machinery.
+
+ FIXME - Remove the following compatibility notes after a handler
+ class for vec_t is implemented.
+
+ To preserve compatibility with the existing API, some functions
+ that manipulate vector elements implement two overloads: one taking
+ a pointer to the element and others that take a pointer to a
+ pointer to the element.
+
+ This used to be implemented with three different families of macros
+ and structures: structure objects, scalar objects and of pointers.
+ Both the structure object and pointer variants passed pointers to
+ objects around -- in the former case the pointers were stored into
+ the vector and in the latter case the pointers were dereferenced and
+ the objects copied into the vector. The scalar object variant was
+ suitable for int-like objects, and the vector elements were returned
+ by value.
+
+ There are both 'index' and 'iterate' accessors. The index accessor
+ is implemented by operator[]. The iterator returns a boolean
+ iteration condition and updates the iteration variable passed by
+ reference. Because the iterator will be inlined, the address-of
+ can be optimized away.
The vectors are implemented using the trailing array idiom, thus
they are not resizeable without changing the address of the vector
@@ -87,43 +95,27 @@ along with GCC; see the file COPYING3. If not see
When a vector type is defined, first a non-memory managed version
is created. You can then define either or both garbage collected
and heap allocated versions. The allocation mechanism is specified
- when the type is defined, and is therefore part of the type. If
- you need both gc'd and heap allocated versions, you still must have
- *exactly* one definition of the common non-memory managed base vector.
+ when the vector is allocated. This can occur via the VEC_alloc
+ call or one of the VEC_safe_* functions that add elements to a
+ vector. If the vector is NULL, it will be allocated using the
+ allocation strategy selected in the call. The valid allocations
+ are defined in enum vec_allocation_t.
If you need to directly manipulate a vector, then the 'address'
accessor will return the address of the start of the vector. Also
the 'space' predicate will tell you whether there is spare capacity
in the vector. You will not normally need to use these two functions.
- Vector types are defined using a DEF_VEC_{O,A,P,I}(TYPEDEF) macro, to
- get the non-memory allocation version, and then a
- DEF_VEC_ALLOC_{O,A,P,I}(TYPEDEF,ALLOC) macro to get memory managed
- vectors. Variables of vector type are declared using a
- VEC(TYPEDEF,ALLOC) macro. The ALLOC argument specifies the
- allocation strategy, and can be either 'gc' or 'heap' for garbage
- collected and heap allocated respectively. It can be 'none' to get
- a vector that must be explicitly allocated (for instance as a
- trailing array of another structure). The characters O, A, P and I
- indicate whether TYPEDEF is a pointer (P), object (O), atomic object
- (A) or integral (I) type. Be careful to pick the correct one, as
- you'll get an awkward and inefficient API if you use the wrong one or
- a even a crash if you pick the atomic object version when the object
- version should have been chosen instead. There is a check, which
- results in a compile-time warning, for the P and I versions, but there
- is no check for the O versions, as that is not possible in plain C.
- Due to the way GTY works, you must annotate any structures you wish to
- insert or reference from a vector with a GTY(()) tag. You need to do
- this even if you never declare the GC allocated variants.
+ Variables of vector type are of type vec_t<ETYPE> where ETYPE is
+ the type of the elements of the vector. Due to the way GTY works,
+ you must annotate any structures you wish to insert or reference
+ from a vector with a GTY(()) tag. You need to do this even if you
+ never use the GC allocated variants.
An example of their use would be,
- DEF_VEC_P(tree); // non-managed tree vector.
- DEF_VEC_ALLOC_P(tree,gc); // gc'd vector of tree pointers. This must
- // appear at file scope.
-
struct my_struct {
- VEC(tree,gc) *v; // A (pointer to) a vector of tree pointers.
+ vec_t<tree> *v; // A (pointer to) a vector of tree pointers.
};
struct my_struct *s;
@@ -136,9 +128,12 @@ along with GCC; see the file COPYING3. If not see
*/
#if ENABLE_CHECKING
-#define VEC_CHECK_INFO ,__FILE__,__LINE__,__FUNCTION__
-#define VEC_CHECK_DECL ,const char *file_,unsigned line_,const char *function_
-#define VEC_CHECK_PASS ,file_,line_,function_
+#define ALONE_VEC_CHECK_INFO __FILE__, __LINE__, __FUNCTION__
+#define VEC_CHECK_INFO , ALONE_VEC_CHECK_INFO
+#define ALONE_VEC_CHECK_DECL const char *file_, unsigned line_, const char *function_
+#define VEC_CHECK_DECL , ALONE_VEC_CHECK_DECL
+#define ALONE_VEC_CHECK_PASS file_, line_, function_
+#define VEC_CHECK_PASS , ALONE_VEC_CHECK_PASS
#define VEC_ASSERT(EXPR,OP,T,A) \
(void)((EXPR) ? 0 : (VEC_ASSERT_FAIL(OP,VEC(T,A)), 0))
@@ -147,8 +142,11 @@ extern void vec_assert_fail (const char *, const char * VEC_CHECK_DECL)
ATTRIBUTE_NORETURN;
#define VEC_ASSERT_FAIL(OP,VEC) vec_assert_fail (OP,#VEC VEC_CHECK_PASS)
#else
+#define ALONE_VEC_CHECK_INFO
#define VEC_CHECK_INFO
+#define ALONE_VEC_CHECK_DECL void
#define VEC_CHECK_DECL
+#define ALONE_VEC_CHECK_PASS
#define VEC_CHECK_PASS
#define VEC_ASSERT(EXPR,OP,T,A) (void)(EXPR)
#endif
@@ -159,27 +157,106 @@ enum vec_allocation_t { heap, gc, stack };
struct vec_prefix
{
- unsigned num;
- unsigned alloc;
+ unsigned num_;
+ unsigned alloc_;
};
/* Vector type, user visible. */
template<typename T>
struct GTY(()) vec_t
{
- vec_prefix prefix;
- T vec[1];
+ unsigned length (void) const;
+ bool empty (void) const;
+ T *address (void);
+ T &last (ALONE_VEC_CHECK_DECL);
+ const T &operator[] (unsigned) const;
+ T &operator[] (unsigned);
+ void embedded_init (int, int = 0);
+
+ template<enum vec_allocation_t A>
+ vec_t<T> *copy (ALONE_MEM_STAT_DECL);
+
+ bool space (int VEC_CHECK_DECL);
+ void splice (vec_t<T> * VEC_CHECK_DECL);
+ T &quick_push (T VEC_CHECK_DECL);
+ T *quick_push (const T * VEC_CHECK_DECL);
+ T &pop (ALONE_VEC_CHECK_DECL);
+ void truncate (unsigned VEC_CHECK_DECL);
+ void replace (unsigned, T VEC_CHECK_DECL);
+ void quick_insert (unsigned, T VEC_CHECK_DECL);
+ void quick_insert (unsigned, const T * VEC_CHECK_DECL);
+ void ordered_remove (unsigned VEC_CHECK_DECL);
+ void unordered_remove (unsigned VEC_CHECK_DECL);
+ void block_remove (unsigned, unsigned VEC_CHECK_DECL);
+
+ unsigned lower_bound (T, bool (*)(T, T)) const;
+ unsigned lower_bound (const T *, bool (*)(const T *, const T *)) const;
+
+ /* Class-static member functions. Some of these will become member
+ functions of a future handler class wrapping vec_t. */
+ static size_t embedded_size (int);
+
+ template<enum vec_allocation_t A>
+ static vec_t<T> *alloc (int MEM_STAT_DECL);
+
+ static vec_t<T> *alloc (int, vec_t<T> *);
+
+ template<enum vec_allocation_t A>
+ static void free (vec_t<T> **);
+
+ template<enum vec_allocation_t A>
+ static vec_t<T> *reserve_exact (vec_t<T> *, int MEM_STAT_DECL);
+
+ template<enum vec_allocation_t A>
+ static bool reserve_exact (vec_t<T> **, int VEC_CHECK_DECL MEM_STAT_DECL);
+
+ template<enum vec_allocation_t A>
+ static vec_t<T> *reserve (vec_t<T> *, int MEM_STAT_DECL);
+
+ template<enum vec_allocation_t A>
+ static bool reserve (vec_t<T> **, int VEC_CHECK_DECL MEM_STAT_DECL);
+
+ template<enum vec_allocation_t A>
+ static void safe_splice (vec_t<T> **, vec_t<T> * VEC_CHECK_DECL
+ MEM_STAT_DECL);
+
+ template<enum vec_allocation_t A>
+ static T &safe_push (vec_t<T> **, T VEC_CHECK_DECL MEM_STAT_DECL);
+
+ template<enum vec_allocation_t A>
+ static T *safe_push (vec_t<T> **, const T * VEC_CHECK_DECL MEM_STAT_DECL);
+
+ template<enum vec_allocation_t A>
+ static void safe_grow (vec_t<T> **, int VEC_CHECK_DECL MEM_STAT_DECL);
+
+ template<enum vec_allocation_t A>
+ static void safe_grow_cleared (vec_t<T> **, int VEC_CHECK_DECL MEM_STAT_DECL);
+
+ template<enum vec_allocation_t A>
+ static void safe_insert (vec_t<T> **, unsigned, T * VEC_CHECK_DECL
+ MEM_STAT_DECL);
+
+ template<enum vec_allocation_t A>
+ static void safe_insert (vec_t<T> **, unsigned, T obj VEC_CHECK_DECL
+ MEM_STAT_DECL);
+
+ static bool iterate (const vec_t<T> *, unsigned, T *);
+ static bool iterate (const vec_t<T> *, unsigned, T **);
+
+ vec_prefix prefix_;
+ T vec_[1];
};
+
/* Garbage collection support for vec_t. */
template<typename T>
void
gt_ggc_mx (vec_t<T> *v)
{
- extern void gt_ggc_mx (T&);
- for (unsigned i = 0; i < v->prefix.num; i++)
- gt_ggc_mx (v->vec[i]);
+ extern void gt_ggc_mx (T &);
+ for (unsigned i = 0; i < v->length (); i++)
+ gt_ggc_mx ((*v)[i]);
}
@@ -189,17 +266,17 @@ template<typename T>
void
gt_pch_nx (vec_t<T> *v)
{
- extern void gt_pch_nx (T&);
- for (unsigned i = 0; i < v->prefix.num; i++)
- gt_pch_nx (v->vec[i]);
+ extern void gt_pch_nx (T &);
+ for (unsigned i = 0; i < v->length (); i++)
+ gt_pch_nx ((*v)[i]);
}
template<typename T>
void
gt_pch_nx (vec_t<T *> *v, gt_pointer_operator op, void *cookie)
{
- for (unsigned i = 0; i < v->prefix.num; i++)
- op (&(v->vec[i]), cookie);
+ for (unsigned i = 0; i < v->length (); i++)
+ op (&((*v)[i]), cookie);
}
template<typename T>
@@ -207,13 +284,14 @@ void
gt_pch_nx (vec_t<T> *v, gt_pointer_operator op, void *cookie)
{
extern void gt_pch_nx (T *, gt_pointer_operator, void *);
- for (unsigned i = 0; i < v->prefix.num; i++)
- gt_pch_nx (&(v->vec[i]), op, cookie);
+ for (unsigned i = 0; i < v->length (); i++)
+ gt_pch_nx (&((*v)[i]), op, cookie);
}
-/* FIXME cxx-conversion. Remove these definitions and update all
- calling sites. */
+/* FIXME. Remove these definitions and update all calling sites after
+ the handler class for vec_t is implemented. */
+
/* Vector of integer-like object. */
#define DEF_VEC_I(T) struct vec_swallow_trailing_semi
#define DEF_VEC_ALLOC_I(T,A) struct vec_swallow_trailing_semi
@@ -270,138 +348,194 @@ extern void *vec_stack_o_reserve_exact (void *, int, size_t, size_t
MEM_STAT_DECL);
extern void vec_stack_free (void *);
-/* Reallocate an array of elements with prefix. */
-template<typename T, enum vec_allocation_t A>
-extern vec_t<T> *vec_reserve (vec_t<T> *, int MEM_STAT_DECL);
-
-template<typename T, enum vec_allocation_t A>
-extern vec_t<T> *vec_reserve_exact (vec_t<T> *, int MEM_STAT_DECL);
-
extern void dump_vec_loc_statistics (void);
extern void ggc_free (void *);
extern void vec_heap_free (void *);
-/* Macros to invoke API calls. A single macro works for both pointer
- and object vectors, but the argument and return types might well be
- different. In each macro, T is the typedef of the vector elements,
- and A is the allocation strategy. The allocation strategy is only
- present when it is required. Some of these macros pass the vector,
- V, by reference (by taking its address), this is noted in the
- descriptions. */
+/* API compatibility macros (to be removed). */
+#define VEC_length(T,V) \
+ ((V) ? (V)->length () : 0)
-/* Length of vector
- unsigned VEC_T_length(const VEC(T) *v);
+#define VEC_empty(T,V) \
+ ((V) ? (V)->empty () : true)
- Return the number of active elements in V. V can be NULL, in which
- case zero is returned. */
+#define VEC_address(T,V) \
+ vec_address<T> (V)
-#define VEC_length(T,V) (VEC_length_1<T> (V))
+/* FIXME. For now, we need to continue expanding VEC_address into a
+ function call. Otherwise, the warning machinery for -Wnonnull gets
+ confused thinking that VEC_address may return null in calls to
+ memcpy and qsort. This will disappear once vec_address becomes
+ a member function for a handler class wrapping vec_t. */
template<typename T>
-static inline unsigned
-VEC_length_1 (const vec_t<T> *vec_)
+static inline T *
+vec_address (vec_t<T> *vec)
{
- return vec_ ? vec_->prefix.num : 0;
+ return vec ? vec->address() : NULL;
}
+#define VEC_last(T,V) \
+ ((V)->last (ALONE_VEC_CHECK_INFO))
-/* Check if vector is empty
- int VEC_T_empty(const VEC(T) *v);
+#define VEC_index(T,V,I) \
+ ((*(V))[I])
- Return nonzero if V is an empty vector (or V is NULL), zero otherwise. */
+#define VEC_iterate(T,V,I,P) \
+ (vec_t<T>::iterate(V, I, &(P)))
-#define VEC_empty(T,V) (VEC_empty_1<T> (V))
+#define VEC_embedded_size(T,N) \
+ (vec_t<T>::embedded_size (N))
-template<typename T>
-static inline bool
-VEC_empty_1 (const vec_t<T> *vec_)
-{
- return VEC_length (T, vec_) == 0;
-}
+#define VEC_embedded_init(T,V,N) \
+ ((V)->embedded_init (N))
+
+#define VEC_free(T,A,V) \
+ (vec_t<T>::free<A> (&(V)))
+
+#define VEC_copy(T,A,V) \
+ ((V)->copy<A> (ALONE_MEM_STAT_INFO))
+
+#define VEC_space(T,V,R) \
+ ((V) ? (V)->space (R VEC_CHECK_INFO) : (R) == 0)
+
+#define VEC_reserve(T,A,V,R) \
+ (vec_t<T>::reserve<A> (&(V), (int)(R) VEC_CHECK_INFO MEM_STAT_INFO))
+
+#define VEC_reserve_exact(T,A,V,R) \
+ (vec_t<T>::reserve_exact<A> (&(V), R VEC_CHECK_INFO MEM_STAT_INFO))
+
+#define VEC_splice(T,DST,SRC) \
+ (DST)->splice (SRC VEC_CHECK_INFO)
+
+#define VEC_safe_splice(T,A,DST,SRC) \
+ vec_t<T>::safe_splice<A> (&(DST), SRC VEC_CHECK_INFO MEM_STAT_INFO)
+
+#define VEC_quick_push(T,V,O) \
+ ((V)->quick_push (O VEC_CHECK_INFO))
+
+#define VEC_safe_push(T,A,V,O) \
+ (vec_t<T>::safe_push<A> (&(V), O VEC_CHECK_INFO MEM_STAT_INFO))
+
+#define VEC_pop(T,V) \
+ ((V)->pop (ALONE_VEC_CHECK_INFO))
+
+#define VEC_truncate(T,V,I) \
+ (V \
+ ? (V)->truncate ((unsigned)(I) VEC_CHECK_INFO) \
+ : gcc_assert ((I) == 0))
+
+#define VEC_safe_grow(T,A,V,I) \
+ (vec_t<T>::safe_grow<A> (&(V), (int)(I) VEC_CHECK_INFO MEM_STAT_INFO))
+
+#define VEC_safe_grow_cleared(T,A,V,I) \
+ (vec_t<T>::safe_grow_cleared<A> (&(V), (int)(I) \
+ VEC_CHECK_INFO MEM_STAT_INFO))
+
+#define VEC_replace(T,V,I,O) \
+ ((V)->replace ((unsigned)(I), O VEC_CHECK_INFO))
+
+#define VEC_quick_insert(T,V,I,O) \
+ ((V)->quick_insert (I,O VEC_CHECK_INFO))
+
+#define VEC_safe_insert(T,A,V,I,O) \
+ (vec_t<T>::safe_insert<A> (&(V), I, O VEC_CHECK_INFO MEM_STAT_INFO))
+#define VEC_ordered_remove(T,V,I) \
+ ((V)->ordered_remove (I VEC_CHECK_INFO))
-/* Get the address of the array of elements
- T *VEC_T_address (VEC(T) v)
+#define VEC_unordered_remove(T,V,I) \
+ ((V)->unordered_remove (I VEC_CHECK_INFO))
- If you need to directly manipulate the array (for instance, you
- want to feed it to qsort), use this accessor. */
+#define VEC_block_remove(T,V,I,L) \
+ ((V)->block_remove (I, L VEC_CHECK_INFO))
-#define VEC_address(T,V) (VEC_address_1<T> (V))
+#define VEC_lower_bound(T,V,O,LT) \
+ ((V)->lower_bound (O, LT))
+
+
+/* Return the number of active elements in this vector. */
template<typename T>
-static inline T *
-VEC_address_1 (vec_t<T> *vec_)
+inline unsigned
+vec_t<T>::length (void) const
{
- return vec_ ? vec_->vec : 0;
+ return prefix_.num_;
}
-/* Get the final element of the vector.
- T VEC_T_last(VEC(T) *v); // Integer
- T VEC_T_last(VEC(T) *v); // Pointer
- T *VEC_T_last(VEC(T) *v); // Object
+/* Return true if this vector has no active elements. */
+
+template<typename T>
+inline bool
+vec_t<T>::empty (void) const
+{
+ return length () == 0;
+}
- Return the final element. V must not be empty. */
-#define VEC_last(T,V) (VEC_last_1<T> (V VEC_CHECK_INFO))
+/* Return the address of the array of elements. If you need to
+ directly manipulate the array (for instance, you want to feed it
+ to qsort), use this accessor. */
template<typename T>
-static inline T&
-VEC_last_1 (vec_t<T> *vec_ VEC_CHECK_DECL)
+inline T *
+vec_t<T>::address (void)
{
- VEC_ASSERT (vec_ && vec_->prefix.num, "last", T, base);
- return vec_->vec[vec_->prefix.num - 1];
+ return vec_;
}
-/* Index into vector
- T VEC_T_index(VEC(T) *v, unsigned ix); // Integer
- T VEC_T_index(VEC(T) *v, unsigned ix); // Pointer
- T *VEC_T_index(VEC(T) *v, unsigned ix); // Object
+/* Get the final element of the vector, which must not be empty. */
+
+template<typename T>
+T &
+vec_t<T>::last (ALONE_VEC_CHECK_DECL)
+{
+ VEC_ASSERT (prefix_.num_, "last", T, base);
+ return (*this)[prefix_.num_ - 1];
+}
- Return the IX'th element. IX must be in the domain of V. */
-#define VEC_index(T,V,I) (VEC_index_1<T> (V, I VEC_CHECK_INFO))
+/* Index into vector. Return the IX'th element. IX must be in the
+ domain of the vector. */
template<typename T>
-static inline T&
-VEC_index_1 (vec_t<T> *vec_, unsigned ix_ VEC_CHECK_DECL)
+const T &
+vec_t<T>::operator[] (unsigned ix) const
{
- VEC_ASSERT (vec_ && ix_ < vec_->prefix.num, "index", T, base);
- return vec_->vec[ix_];
+ gcc_assert (ix < prefix_.num_);
+ return vec_[ix];
}
template<typename T>
-static inline const T&
-VEC_index_1 (const vec_t<T> *vec_, unsigned ix_ VEC_CHECK_DECL)
+T &
+vec_t<T>::operator[] (unsigned ix)
{
- VEC_ASSERT (vec_ && ix_ < vec_->prefix.num, "index", T, base);
- return vec_->vec[ix_];
+ gcc_assert (ix < prefix_.num_);
+ return vec_[ix];
}
-/* Iterate over vector
- int VEC_T_iterate(VEC(T) *v, unsigned ix, T &ptr); // Integer
- int VEC_T_iterate(VEC(T) *v, unsigned ix, T &ptr); // Pointer
- int VEC_T_iterate(VEC(T) *v, unsigned ix, T *&ptr); // Object
-
- Return iteration condition and update PTR to point to the IX'th
- element. At the end of iteration, sets PTR to NULL. Use this to
- iterate over the elements of a vector as follows,
+/* Return iteration condition and update PTR to point to the IX'th
+ element of VEC. Use this to iterate over the elements of a vector
+ as follows,
- for (ix = 0; VEC_iterate(T,v,ix,ptr); ix++)
- continue; */
-
-#define VEC_iterate(T,V,I,P) (VEC_iterate_1<T> (V, I, &(P)))
+ for (ix = 0; vec_t<T>::iterate(v, ix, &ptr); ix++)
+ continue;
+
+ FIXME. This is a static member function because if VEC is NULL,
+ PTR should be initialized to NULL. This will become a regular
+ member function of the handler class. */
template<typename T>
-static inline bool
-VEC_iterate_1 (const vec_t<T> *vec_, unsigned ix_, T *ptr)
+bool
+vec_t<T>::iterate (const vec_t<T> *vec, unsigned ix, T *ptr)
{
- if (vec_ && ix_ < vec_->prefix.num)
+ if (vec && ix < vec->prefix_.num_)
{
- *ptr = vec_->vec[ix_];
+ *ptr = vec->vec_[ix];
return true;
}
else
@@ -411,13 +545,24 @@ VEC_iterate_1 (const vec_t<T> *vec_, unsigned ix_, T *ptr)
}
}
+
+/* Return iteration condition and update *PTR to point to the
+ IX'th element of VEC. Use this to iterate over the elements of a
+ vector as follows,
+
+ for (ix = 0; v->iterate(ix, &ptr); ix++)
+ continue;
+
+ This variant is for vectors of objects. FIXME, to be removed
+ once the distinction between vec_t<T> and vec_t<T *> disappears. */
+
template<typename T>
-static inline bool
-VEC_iterate_1 (vec_t<T> *vec_, unsigned ix_, T **ptr)
+bool
+vec_t<T>::iterate (const vec_t<T> *vec, unsigned ix, T **ptr)
{
- if (vec_ && ix_ < vec_->prefix.num)
+ if (vec && ix < vec->prefix_.num_)
{
- *ptr = &vec_->vec[ix_];
+ *ptr = CONST_CAST (T *, &vec->vec_[ix]);
return true;
}
else
@@ -427,9 +572,10 @@ VEC_iterate_1 (vec_t<T> *vec_, unsigned ix_, T **ptr)
}
}
+
/* Convenience macro for forward iteration. */
-#define FOR_EACH_VEC_ELT(T, V, I, P) \
+#define FOR_EACH_VEC_ELT(T, V, I, P) \
for (I = 0; VEC_iterate (T, (V), (I), (P)); ++(I))
/* Likewise, but start from FROM rather than 0. */
@@ -439,640 +585,517 @@ VEC_iterate_1 (vec_t<T> *vec_, unsigned ix_, T **ptr)
/* Convenience macro for reverse iteration. */
-#define FOR_EACH_VEC_ELT_REVERSE(T,V,I,P) \
- for (I = VEC_length (T, (V)) - 1; \
- VEC_iterate (T, (V), (I), (P)); \
+#define FOR_EACH_VEC_ELT_REVERSE(T, V, I, P) \
+ for (I = VEC_length (T, (V)) - 1; \
+ VEC_iterate (T, (V), (I), (P)); \
(I)--)
-/* Use these to determine the required size and initialization of a
- vector embedded within another structure (as the final member).
+/* Return the number of bytes needed to embed an instance of vec_t inside
+ another data structure.
- size_t VEC_T_embedded_size(int reserve);
- void VEC_T_embedded_init(VEC(T) *v, int reserve);
+ Use these methods to determine the required size and initialization
+ of a vector V of type T embedded within another structure (as the
+ final member):
- These allow the caller to perform the memory allocation. */
+ size_t vec_t<T>::embedded_size<T> (int reserve);
+ void v->embedded_init(int reserve, int active);
-#define VEC_embedded_size(T,N) (VEC_embedded_size_1<T> (N))
+ These allow the caller to perform the memory allocation. */
template<typename T>
-static inline size_t
-VEC_embedded_size_1 (int alloc_)
+size_t
+vec_t<T>::embedded_size (int nelems)
{
- return offsetof (vec_t<T>, vec) + alloc_ * sizeof (T);
+ return offsetof (vec_t<T>, vec_) + nelems * sizeof (T);
}
-#define VEC_embedded_init(T,O,N) (VEC_embedded_init_1<T> (O, N))
+
+/* Initialize the vector to contain room for NELEMS elements and
+ ACTIVE active elements. */
template<typename T>
-static inline void
-VEC_embedded_init_1 (vec_t<T> *vec_, int alloc_)
+void
+vec_t<T>::embedded_init (int nelems, int active)
{
- vec_->prefix.num = 0;
- vec_->prefix.alloc = alloc_;
+ prefix_.num_ = active;
+ prefix_.alloc_ = nelems;
}
-/* Allocate new vector.
- VEC(T,A) *VEC_T_A_alloc(int reserve);
-
- Allocate a new vector with space for RESERVE objects. If RESERVE
+/* Allocate a new vector with space for RESERVE objects. If RESERVE
is zero, NO vector is created.
+ Note that this allocator must always be a macro:
+
We support a vector which starts out with space on the stack and
switches to heap space when forced to reallocate. This works a
- little differently. In the case of stack vectors, VEC_alloc will
- expand to a call to VEC_alloc_1 that calls XALLOCAVAR to request the
+ little differently. In the case of stack vectors, vec_alloc will
+ expand to a call to vec_alloc_1 that calls XALLOCAVAR to request the
initial allocation. This uses alloca to get the initial space.
Since alloca can not be usefully called in an inline function,
- VEC_alloc must always be a macro.
-
- Only the initial allocation will be made using alloca, so pass a
- reasonable estimate that doesn't use too much stack space; don't
- pass zero. Don't return a VEC(TYPE,stack) vector from the function
- which allocated it. */
-
-#define VEC_alloc(T,A,N) \
- ((A == stack) \
- ? VEC_alloc_1 (N, \
- XALLOCAVAR (vec_t<T>, \
- VEC_embedded_size_1<T> (N))) \
- : VEC_alloc_1<T, A> (N MEM_STAT_INFO))
-
-template<typename T, enum vec_allocation_t A>
-static inline vec_t<T> *
-VEC_alloc_1 (int alloc_ MEM_STAT_DECL)
+ vec_alloc must always be a macro.
+
+ Important limitations of stack vectors:
+
+ - Only the initial allocation will be made using alloca, so pass a
+ reasonable estimate that doesn't use too much stack space; don't
+ pass zero.
+
+ - Don't return a stack-allocated vector from the function which
+ allocated it. */
+
+#define VEC_alloc(T,A,N) \
+ ((A == stack) \
+ ? vec_t<T>::alloc (N, XALLOCAVAR (vec_t<T>, vec_t<T>::embedded_size (N)))\
+ : vec_t<T>::alloc<A> (N MEM_STAT_INFO))
+
+template<typename T>
+template<enum vec_allocation_t A>
+vec_t<T> *
+vec_t<T>::alloc (int nelems MEM_STAT_DECL)
{
- return vec_reserve_exact<T, A> (NULL, alloc_ PASS_MEM_STAT);
+ return reserve_exact<A> ((vec_t<T> *) NULL, nelems PASS_MEM_STAT);
}
template<typename T>
-static inline vec_t<T> *
-VEC_alloc_1 (int alloc_, vec_t<T> *space)
+vec_t<T> *
+vec_t<T>::alloc (int nelems, vec_t<T> *space)
{
- return (vec_t<T> *) vec_stack_p_reserve_exact_1 (alloc_, space);
+ return static_cast <vec_t<T> *> (vec_stack_p_reserve_exact_1 (nelems, space));
}
-/* Free a vector.
- void VEC_T_A_free(VEC(T,A) *&);
-
- Free a vector and set it to NULL. */
-
-#define VEC_free(T,A,V) (VEC_free_1<T, A> (&V))
+/* Free vector *V and set it to NULL. */
-template<typename T, enum vec_allocation_t A>
-static inline void
-VEC_free_1 (vec_t<T> **vec_)
+template<typename T>
+template<enum vec_allocation_t A>
+void
+vec_t<T>::free (vec_t<T> **v)
{
- if (*vec_)
+ if (*v)
{
if (A == heap)
- vec_heap_free (*vec_);
+ vec_heap_free (*v);
else if (A == gc)
- ggc_free (*vec_);
+ ggc_free (*v);
else if (A == stack)
- vec_stack_free (*vec_);
+ vec_stack_free (*v);
}
- *vec_ = NULL;
+ *v = NULL;
}
-/* Copy a vector.
- VEC(T,A) *VEC_T_A_copy(VEC(T) *);
-
- Copy the live elements of a vector into a new vector. The new and
- old vectors need not be allocated by the same mechanism. */
+/* Return a copy of this vector. The new and old vectors need not be
+ allocated by the same mechanism. */
-#define VEC_copy(T,A,V) (VEC_copy_1<T, A> (V MEM_STAT_INFO))
-
-template<typename T, enum vec_allocation_t A>
-static inline vec_t<T> *
-VEC_copy_1 (vec_t<T> *vec_ MEM_STAT_DECL)
+template<typename T>
+template<enum vec_allocation_t A>
+vec_t<T> *
+vec_t<T>::copy (ALONE_MEM_STAT_DECL)
{
- size_t len_ = vec_ ? vec_->prefix.num : 0;
- vec_t<T> *new_vec_ = NULL;
+ unsigned len = VEC_length (T, this);
+ vec_t<T> *new_vec = NULL;
- if (len_)
+ if (len)
{
- new_vec_ = vec_reserve_exact<T, A> (NULL, len_ PASS_MEM_STAT);
- new_vec_->prefix.num = len_;
- memcpy (new_vec_->vec, vec_->vec, sizeof (T) * len_);
+ new_vec = reserve_exact<A> (static_cast<vec_t<T> *> (NULL),
+ len PASS_MEM_STAT);
+ new_vec->embedded_init (len, len);
+ memcpy (new_vec->address (), vec_, sizeof (T) * len);
}
- return new_vec_;
-}
-
-/* Determine if a vector has additional capacity.
+ return new_vec;
+}
- int VEC_T_space (VEC(T) *v,int reserve)
- If V has space for RESERVE additional entries, return nonzero. You
- usually only need to use this if you are doing your own vector
- reallocation, for instance on an embedded vector. This returns
- nonzero in exactly the same circumstances that VEC_T_reserve
+/* If this vector has space for RESERVE additional entries, return
+ true. You usually only need to use this if you are doing your
+ own vector reallocation, for instance on an embedded vector. This
+ returns true in exactly the same circumstances that vec_reserve
will. */
-#define VEC_space(T,V,R) (VEC_space_1<T> (V, R VEC_CHECK_INFO))
-
template<typename T>
-static inline int
-VEC_space_1 (vec_t<T> *vec_, int alloc_ VEC_CHECK_DECL)
+bool
+vec_t<T>::space (int nelems VEC_CHECK_DECL)
{
- VEC_ASSERT (alloc_ >= 0, "space", T, base);
- return vec_
- ? vec_->prefix.alloc - vec_->prefix.num >= (unsigned)alloc_
- : !alloc_;
+ VEC_ASSERT (nelems >= 0, "space", T, base);
+ return prefix_.alloc_ - prefix_.num_ >= static_cast <unsigned> (nelems);
}
-/* Reserve space.
- int VEC_T_A_reserve(VEC(T,A) *&v, int reserve);
-
- Ensure that V has at least RESERVE slots available. This will
- create additional headroom. Note this can cause V to be
- reallocated. Returns nonzero iff reallocation actually
- occurred. */
+/* Ensure that the vector **VEC has at least RESERVE slots available. This
+ will create additional headroom. Note this can cause **VEC to
+ be reallocated. Returns true iff reallocation actually occurred. */
-#define VEC_reserve(T,A,V,R) \
- (VEC_reserve_1<T, A> (&(V), (int)(R) VEC_CHECK_INFO MEM_STAT_INFO))
-
-template<typename T, enum vec_allocation_t A>
-static inline int
-VEC_reserve_1 (vec_t<T> **vec_, int alloc_ VEC_CHECK_DECL MEM_STAT_DECL)
+template<typename T>
+template<enum vec_allocation_t A>
+bool
+vec_t<T>::reserve (vec_t<T> **vec, int nelems VEC_CHECK_DECL MEM_STAT_DECL)
{
- int extend = !VEC_space_1 (*vec_, alloc_ VEC_CHECK_PASS);
+ bool extend = (*vec) ? !(*vec)->space (nelems VEC_CHECK_PASS) : nelems != 0;
if (extend)
- *vec_ = vec_reserve<T, A> (*vec_, alloc_ PASS_MEM_STAT);
+ *vec = reserve<A> (*vec, nelems PASS_MEM_STAT);
return extend;
}
-/* Reserve space exactly.
- int VEC_T_A_reserve_exact(VEC(T,A) *&v, int reserve);
-
- Ensure that V has at least RESERVE slots available. This will not
- create additional headroom. Note this can cause V to be
- reallocated. Returns nonzero iff reallocation actually
- occurred. */
+/* Ensure that **VEC has at least NELEMS slots available. This will not
+ create additional headroom. Note this can cause VEC to be
+ reallocated. Returns true iff reallocation actually occurred. */
-#define VEC_reserve_exact(T,A,V,R) \
- (VEC_reserve_exact_1<T, A> (&(V), R VEC_CHECK_INFO MEM_STAT_INFO))
-
-template<typename T, enum vec_allocation_t A>
-static inline int
-VEC_reserve_exact_1 (vec_t<T> **vec_, int alloc_ VEC_CHECK_DECL MEM_STAT_DECL)
+template<typename T>
+template<enum vec_allocation_t A>
+bool
+vec_t<T>::reserve_exact (vec_t<T> **vec, int nelems VEC_CHECK_DECL
+ MEM_STAT_DECL)
{
- int extend = !VEC_space_1 (*vec_, alloc_ VEC_CHECK_PASS);
+ bool extend = (*vec) ? !(*vec)->space (nelems VEC_CHECK_PASS) : nelems != 0;
if (extend)
- *vec_ = vec_reserve_exact<T, A> (*vec_, alloc_ PASS_MEM_STAT);
+ *vec = reserve_exact<A> (*vec, nelems PASS_MEM_STAT);
return extend;
}
-/* Copy elements with no reallocation
- void VEC_T_splice (VEC(T) *dst, VEC(T) *src); // Integer
- void VEC_T_splice (VEC(T) *dst, VEC(T) *src); // Pointer
- void VEC_T_splice (VEC(T) *dst, VEC(T) *src); // Object
-
- Copy the elements in SRC to the end of DST as if by memcpy. DST and
- SRC need not be allocated with the same mechanism, although they most
- often will be. DST is assumed to have sufficient headroom
- available. */
-
-#define VEC_splice(T,DST,SRC) (VEC_splice_1<T> (DST, SRC VEC_CHECK_INFO))
+/* Copy the elements from SRC to the end of this vector as if by memcpy.
+ SRC and this vector need not be allocated with the same mechanism,
+ although they most often will be. This vector is assumed to have
+ sufficient headroom available. */
template<typename T>
-static inline void
-VEC_splice_1 (vec_t<T> *dst_, vec_t<T> *src_ VEC_CHECK_DECL)
+void
+vec_t<T>::splice (vec_t<T> *src VEC_CHECK_DECL)
{
- if (src_)
+ if (src)
{
- unsigned len_ = src_->prefix.num;
- VEC_ASSERT (dst_->prefix.num + len_ <= dst_->prefix.alloc, "splice",
- T, base);
-
- memcpy (&dst_->vec[dst_->prefix.num], &src_->vec[0], len_ * sizeof (T));
- dst_->prefix.num += len_;
+ unsigned len = VEC_length (T, src);
+ VEC_ASSERT (VEC_length (T, this) + len <= prefix_.alloc_, "splice", T,
+ base);
+ memcpy (address () + VEC_length (T, this),
+ src->address (),
+ len * sizeof (T));
+ prefix_.num_ += len;
}
}
-/* Copy elements with reallocation
- void VEC_T_safe_splice (VEC(T,A) *&dst, VEC(T) *src); // Integer
- void VEC_T_safe_splice (VEC(T,A) *&dst, VEC(T) *src); // Pointer
- void VEC_T_safe_splice (VEC(T,A) *&dst, VEC(T) *src); // Object
-
- Copy the elements in SRC to the end of DST as if by memcpy. DST and
+/* Copy the elements in SRC to the end of DST as if by memcpy. DST and
SRC need not be allocated with the same mechanism, although they most
often will be. DST need not have sufficient headroom and will be
reallocated if needed. */
-#define VEC_safe_splice(T,A,DST,SRC) \
- (VEC_safe_splice_1<T, A> (&(DST), SRC VEC_CHECK_INFO MEM_STAT_INFO))
-
-template<typename T, enum vec_allocation_t A>
-static inline void
-VEC_safe_splice_1 (vec_t<T> **dst_, vec_t<T> *src_ VEC_CHECK_DECL MEM_STAT_DECL)
+template<typename T>
+template<enum vec_allocation_t A>
+void
+vec_t<T>::safe_splice (vec_t<T> **dst, vec_t<T> *src VEC_CHECK_DECL
+ MEM_STAT_DECL)
{
- if (src_)
+ if (src)
{
- VEC_reserve_exact_1<T, A> (dst_, src_->prefix.num
- VEC_CHECK_PASS MEM_STAT_INFO);
-
- VEC_splice_1 (*dst_, src_ VEC_CHECK_PASS);
+ reserve_exact<A> (dst, VEC_length (T, src) VEC_CHECK_PASS MEM_STAT_INFO);
+ (*dst)->splice (src VEC_CHECK_PASS);
}
}
-/* Push object with no reallocation
- T *VEC_T_quick_push (VEC(T) *v, T obj); // Integer
- T *VEC_T_quick_push (VEC(T) *v, T obj); // Pointer
- T *VEC_T_quick_push (VEC(T) *v, T *obj); // Object
-
- Push a new element onto the end, returns a pointer to the slot
- filled in. For object vectors, the new value can be NULL, in which
- case NO initialization is performed. There must
- be sufficient space in the vector. */
-
-#define VEC_quick_push(T,V,O) (VEC_quick_push_1<T> (V, O VEC_CHECK_INFO))
+/* Push OBJ (a new element) onto the end, returns a reference to the slot
+ filled in. There must be sufficient space in the vector. */
template<typename T>
-static inline T &
-VEC_quick_push_1 (vec_t<T> *vec_, T obj_ VEC_CHECK_DECL)
+T &
+vec_t<T>::quick_push (T obj VEC_CHECK_DECL)
{
- VEC_ASSERT (vec_->prefix.num < vec_->prefix.alloc, "push", T, base);
- vec_->vec[vec_->prefix.num] = obj_;
- T &val_ = vec_->vec[vec_->prefix.num];
- vec_->prefix.num++;
- return val_;
+ VEC_ASSERT (prefix_.num_ < prefix_.alloc_, "push", T, base);
+ vec_[prefix_.num_] = obj;
+ T &val = vec_[prefix_.num_];
+ prefix_.num_++;
+ return val;
}
+
+/* Push PTR (a new pointer to an element) onto the end, returns a
+ pointer to the slot filled in. The new value can be NULL, in which
+ case NO initialization is performed. There must be sufficient
+ space in the vector. */
+
template<typename T>
-static inline T *
-VEC_quick_push_1 (vec_t<T> *vec_, const T *ptr_ VEC_CHECK_DECL)
+T *
+vec_t<T>::quick_push (const T *ptr VEC_CHECK_DECL)
{
- T *slot_;
- VEC_ASSERT (vec_->prefix.num < vec_->prefix.alloc, "push", T, base);
- slot_ = &vec_->vec[vec_->prefix.num++];
- if (ptr_)
- *slot_ = *ptr_;
- return slot_;
+ VEC_ASSERT (prefix_.num_ < prefix_.alloc_, "push", T, base);
+ T *slot = &vec_[prefix_.num_++];
+ if (ptr)
+ *slot = *ptr;
+ return slot;
}
-/* Push object with reallocation
- T *VEC_T_A_safe_push (VEC(T,A) *&v, T obj); // Integer
- T *VEC_T_A_safe_push (VEC(T,A) *&v, T obj); // Pointer
- T *VEC_T_A_safe_push (VEC(T,A) *&v, T *obj); // Object
-
- Push a new element onto the end, returns a pointer to the slot
- filled in. For object vectors, the new value can be NULL, in which
- case NO initialization is performed. Reallocates V, if needed. */
+/* Push a new element OBJ onto the end of VEC. Returns a reference to
+ the slot filled in. Reallocates V, if needed. */
-#define VEC_safe_push(T,A,V,O) \
- (VEC_safe_push_1<T, A> (&(V), O VEC_CHECK_INFO MEM_STAT_INFO))
-
-template<typename T, enum vec_allocation_t A>
-static inline T &
-VEC_safe_push_1 (vec_t<T> **vec_, T obj_ VEC_CHECK_DECL MEM_STAT_DECL)
+template<typename T>
+template<enum vec_allocation_t A>
+T &
+vec_t<T>::safe_push (vec_t<T> **vec, T obj VEC_CHECK_DECL MEM_STAT_DECL)
{
- VEC_reserve_1<T, A> (vec_, 1 VEC_CHECK_PASS PASS_MEM_STAT);
- return VEC_quick_push_1 (*vec_, obj_ VEC_CHECK_PASS);
+ reserve<A> (vec, 1 VEC_CHECK_PASS PASS_MEM_STAT);
+ return (*vec)->quick_push (obj VEC_CHECK_PASS);
}
-template<typename T, enum vec_allocation_t A>
-static inline T *
-VEC_safe_push_1 (vec_t<T> **vec_, const T *ptr_ VEC_CHECK_DECL MEM_STAT_DECL)
+
+/* Push a pointer PTR to a new element onto the end of VEC. Returns a
+ pointer to the slot filled in. For object vectors, the new value
+ can be NULL, in which case NO initialization is performed.
+ Reallocates VEC, if needed. */
+
+template<typename T>
+template<enum vec_allocation_t A>
+T *
+vec_t<T>::safe_push (vec_t<T> **vec, const T *ptr VEC_CHECK_DECL MEM_STAT_DECL)
{
- VEC_reserve_1<T, A> (vec_, 1 VEC_CHECK_PASS PASS_MEM_STAT);
- return VEC_quick_push_1 (*vec_, ptr_ VEC_CHECK_PASS);
+ reserve<A> (vec, 1 VEC_CHECK_PASS PASS_MEM_STAT);
+ return (*vec)->quick_push (ptr VEC_CHECK_PASS);
}
-/* Pop element off end
- T VEC_T_pop (VEC(T) *v); // Integer
- T VEC_T_pop (VEC(T) *v); // Pointer
- void VEC_T_pop (VEC(T) *v); // Object
+/* Pop and return the last element off the end of the vector. */
- Pop the last element off the end. Returns the element popped, for
- pointer vectors. */
-
-#define VEC_pop(T,V) (VEC_pop_1<T> (V VEC_CHECK_INFO))
template<typename T>
-static inline T&
-VEC_pop_1 (vec_t<T> *vec_ VEC_CHECK_DECL)
+T &
+vec_t<T>::pop (ALONE_VEC_CHECK_DECL)
{
- VEC_ASSERT (vec_->prefix.num, "pop", T, base);
- return vec_->vec[--vec_->prefix.num];
+ VEC_ASSERT (prefix_.num_, "pop", T, base);
+ return vec_[--prefix_.num_];
}
-/* Truncate to specific length
- void VEC_T_truncate (VEC(T) *v, unsigned len);
-
- Set the length as specified. The new length must be less than or
- equal to the current length. This is an O(1) operation. */
-
-#define VEC_truncate(T,V,I) \
- (VEC_truncate_1<T> (V, (unsigned)(I) VEC_CHECK_INFO))
+/* Set the length of the vector to LEN. The new length must be less
+ than or equal to the current length. This is an O(1) operation. */
template<typename T>
-static inline void
-VEC_truncate_1 (vec_t<T> *vec_, unsigned size_ VEC_CHECK_DECL)
+void
+vec_t<T>::truncate (unsigned size VEC_CHECK_DECL)
{
- VEC_ASSERT (vec_ ? vec_->prefix.num >= size_ : !size_, "truncate", T, base);
- if (vec_)
- vec_->prefix.num = size_;
+ VEC_ASSERT (prefix_.num_ >= size, "truncate", T, base);
+ prefix_.num_ = size;
}
-/* Grow to a specific length.
- void VEC_T_A_safe_grow (VEC(T,A) *&v, int len);
-
- Grow the vector to a specific length. The LEN must be as
+/* Grow the vector VEC to a specific length. The LEN must be as
long or longer than the current length. The new elements are
uninitialized. */
-#define VEC_safe_grow(T,A,V,I) \
- (VEC_safe_grow_1<T, A> (&(V), (int)(I) VEC_CHECK_INFO MEM_STAT_INFO))
-
-template<typename T, enum vec_allocation_t A>
-static inline void
-VEC_safe_grow_1 (vec_t<T> **vec_, int size_ VEC_CHECK_DECL MEM_STAT_DECL)
+template<typename T>
+template<enum vec_allocation_t A>
+void
+vec_t<T>::safe_grow (vec_t<T> **vec, int size VEC_CHECK_DECL MEM_STAT_DECL)
{
- VEC_ASSERT (size_ >= 0 && VEC_length (T, *vec_) <= (unsigned)size_,
+ VEC_ASSERT (size >= 0 && VEC_length (T, *vec) <= (unsigned)size,
"grow", T, A);
- VEC_reserve_exact_1<T, A> (vec_,
- size_ - (int)(*vec_ ? (*vec_)->prefix.num : 0)
- VEC_CHECK_PASS PASS_MEM_STAT);
- (*vec_)->prefix.num = size_;
+ reserve_exact<A> (vec, size - (int)VEC_length (T, *vec)
+ VEC_CHECK_PASS PASS_MEM_STAT);
+ (*vec)->prefix_.num_ = size;
}
-/* Grow to a specific length.
- void VEC_T_A_safe_grow_cleared (VEC(T,A) *&v, int len);
-
- Grow the vector to a specific length. The LEN must be as
+/* Grow the vector *VEC to a specific length. The LEN must be as
long or longer than the current length. The new elements are
initialized to zero. */
-#define VEC_safe_grow_cleared(T,A,V,I) \
- (VEC_safe_grow_cleared_1<T,A> (&(V), (int)(I) \
- VEC_CHECK_INFO MEM_STAT_INFO))
-
-template<typename T, enum vec_allocation_t A>
-static inline void
-VEC_safe_grow_cleared_1 (vec_t<T> **vec_, int size_ VEC_CHECK_DECL
- MEM_STAT_DECL)
+template<typename T>
+template<enum vec_allocation_t A>
+void
+vec_t<T>::safe_grow_cleared (vec_t<T> **vec, int size VEC_CHECK_DECL
+ MEM_STAT_DECL)
{
- int oldsize = VEC_length (T, *vec_);
- VEC_safe_grow_1<T, A> (vec_, size_ VEC_CHECK_PASS PASS_MEM_STAT);
- memset (&(VEC_address (T, *vec_)[oldsize]), 0,
- sizeof (T) * (size_ - oldsize));
+ int oldsize = VEC_length (T, *vec);
+ safe_grow<A> (vec, size VEC_CHECK_PASS PASS_MEM_STAT);
+ memset (&((*vec)->address ()[oldsize]), 0, sizeof (T) * (size - oldsize));
}
-/* Replace element
- T VEC_T_replace (VEC(T) *v, unsigned ix, T val); // Integer
- T VEC_T_replace (VEC(T) *v, unsigned ix, T val); // Pointer
- T *VEC_T_replace (VEC(T) *v, unsigned ix, T *val); // Object
-
- Replace the IXth element of V with a new value, VAL. For pointer
- vectors returns the original value. For object vectors returns a
- pointer to the new value. For object vectors the new value can be
- NULL, in which case no overwriting of the slot is actually
- performed. */
-
-#define VEC_replace(T,V,I,O) \
- (VEC_replace_1<T> (V, (unsigned)(I), O VEC_CHECK_INFO))
+/* Replace the IXth element of this vector with a new value, VAL. */
template<typename T>
-static inline T&
-VEC_replace_1 (vec_t<T> *vec_, unsigned ix_, T obj_ VEC_CHECK_DECL)
+void
+vec_t<T>::replace (unsigned ix, T obj VEC_CHECK_DECL)
{
- VEC_ASSERT (ix_ < vec_->prefix.num, "replace", T, base);
- vec_->vec[ix_] = obj_;
- return vec_->vec[ix_];
+ VEC_ASSERT (ix < prefix_.num_, "replace", T, base);
+ vec_[ix] = obj;
}
-/* Insert object with no reallocation
- void VEC_T_quick_insert (VEC(T) *v, unsigned ix, T val); // Integer
- void VEC_T_quick_insert (VEC(T) *v, unsigned ix, T val); // Pointer
- void VEC_T_quick_insert (VEC(T) *v, unsigned ix, T *val); // Object
+/* Insert an element, OBJ, at the IXth position of VEC. There must be
+ sufficient space. */
- Insert an element, VAL, at the IXth position of V. For vectors of
- object, the new value can be NULL, in which case no initialization
- of the inserted slot takes place. There must be sufficient space. */
+template<typename T>
+void
+vec_t<T>::quick_insert (unsigned ix, T obj VEC_CHECK_DECL)
+{
+ VEC_ASSERT (prefix_.num_ < prefix_.alloc_, "insert", T, base);
+ VEC_ASSERT (ix <= prefix_.num_, "insert", T, base);
+ T *slot = &vec_[ix];
+ memmove (slot + 1, slot, (prefix_.num_++ - ix) * sizeof (T));
+ *slot = obj;
+}
-#define VEC_quick_insert(T,V,I,O) \
- (VEC_quick_insert_1<T> (V,I,O VEC_CHECK_INFO))
+
+/* Insert an element, *PTR, at the IXth position of V. The new value
+ can be NULL, in which case no initialization of the inserted slot
+ takes place. There must be sufficient space. */
template<typename T>
-static inline void
-VEC_quick_insert_1 (vec_t<T> *vec_, unsigned ix_, T obj_ VEC_CHECK_DECL)
+void
+vec_t<T>::quick_insert (unsigned ix, const T *ptr VEC_CHECK_DECL)
{
- T *slot_;
-
- VEC_ASSERT (vec_->prefix.num < vec_->prefix.alloc, "insert", T, base);
- VEC_ASSERT (ix_ <= vec_->prefix.num, "insert", T, base);
- slot_ = &vec_->vec[ix_];
- memmove (slot_ + 1, slot_, (vec_->prefix.num++ - ix_) * sizeof (T));
- *slot_ = obj_;
+ VEC_ASSERT (prefix_.num_ < prefix_.alloc_, "insert", T, base);
+ VEC_ASSERT (ix <= prefix_.num_, "insert", T, base);
+ T *slot = &vec_[ix];
+ memmove (slot + 1, slot, (prefix_.num_++ - ix) * sizeof (T));
+ if (ptr)
+ *slot = *ptr;
}
+
+/* Insert an element, VAL, at the IXth position of VEC. Reallocate
+ VEC, if necessary. */
+
template<typename T>
-static inline void
-VEC_quick_insert_1 (vec_t<T> *vec_, unsigned ix_, const T *ptr_ VEC_CHECK_DECL)
+template<enum vec_allocation_t A>
+void
+vec_t<T>::safe_insert (vec_t<T> **vec, unsigned ix, T obj VEC_CHECK_DECL
+ MEM_STAT_DECL)
{
- T *slot_;
-
- VEC_ASSERT (vec_->prefix.num < vec_->prefix.alloc, "insert", T, base);
- VEC_ASSERT (ix_ <= vec_->prefix.num, "insert", T, base);
- slot_ = &vec_->vec[ix_];
- memmove (slot_ + 1, slot_, (vec_->prefix.num++ - ix_) * sizeof (T));
- if (ptr_)
- *slot_ = *ptr_;
+ reserve<A> (vec, 1 VEC_CHECK_PASS PASS_MEM_STAT);
+ (*vec)->quick_insert (ix, obj VEC_CHECK_PASS);
}
-/* Insert object with reallocation
- T *VEC_T_A_safe_insert (VEC(T,A) *&v, unsigned ix, T val); // Integer
- T *VEC_T_A_safe_insert (VEC(T,A) *&v, unsigned ix, T val); // Pointer
- T *VEC_T_A_safe_insert (VEC(T,A) *&v, unsigned ix, T *val); // Object
-
- Insert an element, VAL, at the IXth position of V. Return a pointer
+/* Insert an element, *PTR, at the IXth position of VEC. Return a pointer
to the slot created. For vectors of object, the new value can be
NULL, in which case no initialization of the inserted slot takes
place. Reallocate V, if necessary. */
-#define VEC_safe_insert(T,A,V,I,O) \
- (VEC_safe_insert_1<T, A> (&(V),I,O VEC_CHECK_INFO MEM_STAT_INFO))
-
-template<typename T, enum vec_allocation_t A>
-static inline void
-VEC_safe_insert_1 (vec_t<T> **vec_, unsigned ix_, T obj_
- VEC_CHECK_DECL MEM_STAT_DECL)
-{
- VEC_reserve_1<T, A> (vec_, 1 VEC_CHECK_PASS PASS_MEM_STAT);
- VEC_quick_insert_1 (*vec_, ix_, obj_ VEC_CHECK_PASS);
-}
-
-template<typename T, enum vec_allocation_t A>
-static inline void
-VEC_safe_insert_1 (vec_t<T> **vec_, unsigned ix_, T *ptr_
- VEC_CHECK_DECL MEM_STAT_DECL)
+template<typename T>
+template<enum vec_allocation_t A>
+void
+vec_t<T>::safe_insert (vec_t<T> **vec, unsigned ix, T *ptr VEC_CHECK_DECL
+ MEM_STAT_DECL)
{
- VEC_reserve_1<T, A> (vec_, 1 VEC_CHECK_PASS PASS_MEM_STAT);
- VEC_quick_insert_1 (*vec_, ix_, ptr_ VEC_CHECK_PASS);
+ reserve<A> (vec, 1 VEC_CHECK_PASS PASS_MEM_STAT);
+ (*vec)->quick_insert (ix, ptr VEC_CHECK_PASS);
}
-
-/* Remove element retaining order
- void VEC_T_ordered_remove (VEC(T) *v, unsigned ix); // Integer
- void VEC_T_ordered_remove (VEC(T) *v, unsigned ix); // Pointer
- void VEC_T_ordered_remove (VEC(T) *v, unsigned ix); // Object
-
- Remove an element from the IXth position of V. Ordering of
+/* Remove an element from the IXth position of this vector. Ordering of
remaining elements is preserved. This is an O(N) operation due to
a memmove. */
-#define VEC_ordered_remove(T,V,I) \
- (VEC_ordered_remove_1<T> (V,I VEC_CHECK_INFO))
-
template<typename T>
-static inline void
-VEC_ordered_remove_1 (vec_t<T> *vec_, unsigned ix_ VEC_CHECK_DECL)
+void
+vec_t<T>::ordered_remove (unsigned ix VEC_CHECK_DECL)
{
- T *slot_;
- VEC_ASSERT (ix_ < vec_->prefix.num, "remove", T, base);
- slot_ = &vec_->vec[ix_];
- memmove (slot_, slot_ + 1, (--vec_->prefix.num - ix_) * sizeof (T));
+ VEC_ASSERT (ix < prefix_.num_, "remove", T, base);
+ T *slot = &vec_[ix];
+ memmove (slot, slot + 1, (--prefix_.num_ - ix) * sizeof (T));
}
-/* Remove element destroying order
- void VEC_T_unordered_remove (VEC(T) *v, unsigned ix); // Integer
- void VEC_T_unordered_remove (VEC(T) *v, unsigned ix); // Pointer
- void VEC_T_unordered_remove (VEC(T) *v, unsigned ix); // Object
-
- Remove an element from the IXth position of V. Ordering of
+/* Remove an element from the IXth position of VEC. Ordering of
remaining elements is destroyed. This is an O(1) operation. */
-#define VEC_unordered_remove(T,V,I) \
- (VEC_unordered_remove_1<T> (V,I VEC_CHECK_INFO))
-
template<typename T>
-static inline void
-VEC_unordered_remove_1 (vec_t<T> *vec_, unsigned ix_ VEC_CHECK_DECL)
+void
+vec_t<T>::unordered_remove (unsigned ix VEC_CHECK_DECL)
{
- VEC_ASSERT (ix_ < vec_->prefix.num, "remove", T, base);
- vec_->vec[ix_] = vec_->vec[--vec_->prefix.num];
+ VEC_ASSERT (ix < prefix_.num_, "remove", T, base);
+ vec_[ix] = vec_[--prefix_.num_];
}
-/* Remove a block of elements
- void VEC_T_block_remove (VEC(T) *v, unsigned ix, unsigned len);
-
- Remove LEN elements starting at the IXth. Ordering is retained.
+/* Remove LEN elements starting at the IXth. Ordering is retained.
This is an O(N) operation due to memmove. */
-#define VEC_block_remove(T,V,I,L) \
- (VEC_block_remove_1<T> (V, I, L VEC_CHECK_INFO))
-
template<typename T>
-static inline void
-VEC_block_remove_1 (vec_t<T> *vec_, unsigned ix_, unsigned len_ VEC_CHECK_DECL)
+void
+vec_t<T>::block_remove (unsigned ix, unsigned len VEC_CHECK_DECL)
{
- T *slot_;
- VEC_ASSERT (ix_ + len_ <= vec_->prefix.num, "block_remove", T, base);
- slot_ = &vec_->vec[ix_];
- vec_->prefix.num -= len_;
- memmove (slot_, slot_ + len_, (vec_->prefix.num - ix_) * sizeof (T));
+ VEC_ASSERT (ix + len <= prefix_.num_, "block_remove", T, base);
+ T *slot = &vec_[ix];
+ prefix_.num_ -= len;
+ memmove (slot, slot + len, (prefix_.num_ - ix) * sizeof (T));
}
+/* Sort the contents of V with qsort. Use CMP as the comparison function. */
+#define VEC_qsort(T,V,CMP) \
+ qsort (VEC_address (T, V), VEC_length (T, V), sizeof (T), CMP)
-/* Conveniently sort the contents of the vector with qsort.
- void VEC_qsort (VEC(T) *v, int (*cmp_func)(const void *, const void *)) */
-
-#define VEC_qsort(T,V,CMP) qsort(VEC_address (T, V), VEC_length (T, V), \
- sizeof (T), CMP)
-
-
-/* Find the first index in the vector not less than the object.
- unsigned VEC_T_lower_bound (VEC(T) *v, const T val,
- bool (*lessthan) (const T, const T)); // Integer
- unsigned VEC_T_lower_bound (VEC(T) *v, const T val,
- bool (*lessthan) (const T, const T)); // Pointer
- unsigned VEC_T_lower_bound (VEC(T) *v, const T *val,
- bool (*lessthan) (const T*, const T*)); // Object
- Find the first position in which VAL could be inserted without
- changing the ordering of V. LESSTHAN is a function that returns
- true if the first argument is strictly less than the second. */
-
-#define VEC_lower_bound(T,V,O,LT) \
- (VEC_lower_bound_1<T> (V, O, LT VEC_CHECK_INFO))
+/* Find and return the first position in which OBJ could be inserted
+ without changing the ordering of this vector. LESSTHAN is a
+ function that returns true if the first argument is strictly less
+ than the second. */
template<typename T>
-static inline unsigned
-VEC_lower_bound_1 (vec_t<T> *vec_, T obj_,
- bool (*lessthan_)(T, T) VEC_CHECK_DECL)
+unsigned
+vec_t<T>::lower_bound (T obj, bool (*lessthan)(T, T)) const
{
- unsigned int len_ = VEC_length (T, vec_);
- unsigned int half_, middle_;
- unsigned int first_ = 0;
- while (len_ > 0)
+ unsigned int len = VEC_length (T, this);
+ unsigned int half, middle;
+ unsigned int first = 0;
+ while (len > 0)
{
- T middle_elem_;
- half_ = len_ >> 1;
- middle_ = first_;
- middle_ += half_;
- middle_elem_ = VEC_index_1 (vec_, middle_ VEC_CHECK_PASS);
- if (lessthan_ (middle_elem_, obj_))
+ half = len >> 1;
+ middle = first;
+ middle += half;
+ T middle_elem = (*this)[middle];
+ if (lessthan (middle_elem, obj))
{
- first_ = middle_;
- ++first_;
- len_ = len_ - half_ - 1;
+ first = middle;
+ ++first;
+ len = len - half - 1;
}
else
- len_ = half_;
+ len = half;
}
- return first_;
+ return first;
}
+
+/* Find and return the first position in which *PTR could be inserted
+ without changing the ordering of this vector. LESSTHAN is a
+ function that returns true if the first argument is strictly less
+ than the second. */
+
template<typename T>
-static inline unsigned
-VEC_lower_bound_1 (vec_t<T> *vec_, const T *ptr_,
- bool (*lessthan_)(const T*, const T*) VEC_CHECK_DECL)
+unsigned
+vec_t<T>::lower_bound (const T *ptr,
+ bool (*lessthan_)(const T *, const T *)) const
{
- unsigned int len_ = VEC_length (T, vec_);
- unsigned int half_, middle_;
- unsigned int first_ = 0;
- while (len_ > 0)
+ unsigned int len = VEC_length (T, this);
+ unsigned int half, middle;
+ unsigned int first = 0;
+ while (len > 0)
{
- T *middle_elem_;
- half_ = len_ >> 1;
- middle_ = first_;
- middle_ += half_;
- middle_elem_ = &VEC_index_1 (vec_, middle_ VEC_CHECK_PASS);
- if (lessthan_ (middle_elem_, ptr_))
+ half = len >> 1;
+ middle = first;
+ middle += half;
+ const T *middle_elem = &(*this)[middle];
+ if (lessthan (middle_elem, ptr))
{
- first_ = middle_;
- ++first_;
- len_ = len_ - half_ - 1;
+ first = middle;
+ ++first;
+ len = len - half - 1;
}
else
- len_ = half_;
+ len = half;
}
- return first_;
+ return first;
}
@@ -1084,62 +1107,62 @@ void *vec_gc_o_reserve_1 (void *, int, size_t, size_t, bool MEM_STAT_DECL);
exponentially. As a special case, if VEC_ is NULL, and RESERVE is
0, no vector will be created. */
-template<typename T, enum vec_allocation_t A>
+template<typename T>
+template<enum vec_allocation_t A>
vec_t<T> *
-vec_reserve (vec_t<T> *vec_, int reserve MEM_STAT_DECL)
+vec_t<T>::reserve (vec_t<T> *vec, int reserve MEM_STAT_DECL)
{
- if (A == gc)
- return (vec_t<T> *) vec_gc_o_reserve_1 (vec_, reserve,
- offsetof (vec_t<T>, vec),
- sizeof (T), false
- PASS_MEM_STAT);
- else if (A == heap)
- return (vec_t<T> *) vec_heap_o_reserve_1 (vec_, reserve,
- offsetof (vec_t<T>, vec),
- sizeof (T), false
- PASS_MEM_STAT);
- else
- return (vec_t<T> *) vec_stack_o_reserve (vec_, reserve,
- offsetof (vec_t<T>, vec),
- sizeof (T) PASS_MEM_STAT);
+ void *res = NULL;
+ size_t off = offsetof (vec_t<T>, vec_);
+ size_t sz = sizeof (T);
+
+ switch (A)
+ {
+ case gc:
+ res = vec_gc_o_reserve_1 (vec, reserve, off, sz, false PASS_MEM_STAT);
+ break;
+ case heap:
+ res = vec_heap_o_reserve_1 (vec, reserve, off, sz, false PASS_MEM_STAT);
+ break;
+ case stack:
+ res = vec_stack_o_reserve (vec, reserve, off, sz PASS_MEM_STAT);
+ break;
+ }
+
+ return static_cast <vec_t<T> *> (res);
}
-/* Ensure there are at least RESERVE free slots in VEC_, growing
+/* Ensure there are at least RESERVE free slots in VEC, growing
exactly. If RESERVE < 0 grow exactly, else grow exponentially. As
- a special case, if VEC_ is NULL, and RESERVE is 0, no vector will be
+ a special case, if VEC is NULL, and RESERVE is 0, no vector will be
created. */
-template<typename T, enum vec_allocation_t A>
+template<typename T>
+template<enum vec_allocation_t A>
vec_t<T> *
-vec_reserve_exact (vec_t<T> *vec_, int reserve MEM_STAT_DECL)
+vec_t<T>::reserve_exact (vec_t<T> *vec, int reserve MEM_STAT_DECL)
{
- if (A == gc)
- return (vec_t<T> *) vec_gc_o_reserve_1 (vec_, reserve,
- sizeof (struct vec_prefix),
- sizeof (T), true
- PASS_MEM_STAT);
- else if (A == heap)
- return (vec_t<T> *) vec_heap_o_reserve_1 (vec_, reserve,
- sizeof (struct vec_prefix),
- sizeof (T), true
- PASS_MEM_STAT);
- else if (A == stack)
+ void *res = NULL;
+ size_t off = sizeof (struct vec_prefix);
+ size_t sz = sizeof (T);
+
+ gcc_assert (offsetof (vec_t<T>, vec_) == sizeof (struct vec_prefix));
+
+ switch (A)
{
- /* Only allow stack vectors when re-growing them. The initial
- allocation of stack vectors must be done with VEC_alloc,
- because it uses alloca() for the allocation. */
- if (vec_ == NULL)
- {
- fprintf (stderr, "Stack vectors must be initially allocated "
- "with VEC_stack_alloc.\n");
- gcc_unreachable ();
- }
- return (vec_t<T> *) vec_stack_o_reserve_exact (vec_, reserve,
- sizeof (struct vec_prefix),
- sizeof (T)
- PASS_MEM_STAT);
+ case gc:
+ res = vec_gc_o_reserve_1 (vec, reserve, off, sz, true PASS_MEM_STAT);
+ break;
+ case heap:
+ res = vec_heap_o_reserve_1 (vec, reserve, off, sz, true PASS_MEM_STAT);
+ break;
+ case stack:
+ res = vec_stack_o_reserve_exact (vec, reserve, off, sz PASS_MEM_STAT);
+ break;
}
+
+ return static_cast <vec_t<T> *> (res);
}
#endif /* GCC_VEC_H */
diff --git a/libgcc/ChangeLog b/libgcc/ChangeLog
index 463b87289c6..ed5b2af2e22 100644
--- a/libgcc/ChangeLog
+++ b/libgcc/ChangeLog
@@ -1,3 +1,11 @@
+2012-09-04 Teresa Johnson <tejohnson@google.com>
+
+ * libgcov.c (struct gcov_summary_buffer): New structure.
+ (gcov_histogram_insert): New function.
+ (gcov_compute_histogram): Ditto.
+ (gcov_exit): Invoke gcov_compute_histogram, and perform merging of
+ histograms during summary merging.
+
2012-09-01 Mark Kettenis <kettenis@openbsd.org>
* config.host (x86_64-*-openbsd*): New target.
diff --git a/libgcc/libgcov.c b/libgcc/libgcov.c
index a22e8f4c3ee..fce8587affe 100644
--- a/libgcc/libgcov.c
+++ b/libgcc/libgcov.c
@@ -97,6 +97,12 @@ struct gcov_fn_buffer
/* note gcov_fn_info ends in a trailing array. */
};
+struct gcov_summary_buffer
+{
+ struct gcov_summary_buffer *next;
+ struct gcov_summary summary;
+};
+
/* Chain of per-object gcov structures. */
static struct gcov_info *gcov_list;
@@ -276,6 +282,76 @@ gcov_version (struct gcov_info *ptr, gcov_unsigned_t version,
return 1;
}
+/* Insert counter VALUE into HISTOGRAM. */
+
+static void
+gcov_histogram_insert(gcov_bucket_type *histogram, gcov_type value)
+{
+ unsigned i;
+
+ i = gcov_histo_index(value);
+ histogram[i].num_counters++;
+ histogram[i].cum_value += value;
+ if (value < histogram[i].min_value)
+ histogram[i].min_value = value;
+}
+
+/* Computes a histogram of the arc counters to place in the summary SUM. */
+
+static void
+gcov_compute_histogram (struct gcov_summary *sum)
+{
+ struct gcov_info *gi_ptr;
+ const struct gcov_fn_info *gfi_ptr;
+ const struct gcov_ctr_info *ci_ptr;
+ struct gcov_ctr_summary *cs_ptr;
+ unsigned t_ix, f_ix, ctr_info_ix, ix;
+ int h_ix;
+
+ /* This currently only applies to arc counters. */
+ t_ix = GCOV_COUNTER_ARCS;
+
+ /* First check if there are any counts recorded for this counter. */
+ cs_ptr = &(sum->ctrs[t_ix]);
+ if (!cs_ptr->num)
+ return;
+
+ for (h_ix = 0; h_ix < GCOV_HISTOGRAM_SIZE; h_ix++)
+ {
+ cs_ptr->histogram[h_ix].num_counters = 0;
+ cs_ptr->histogram[h_ix].min_value = cs_ptr->run_max;
+ cs_ptr->histogram[h_ix].cum_value = 0;
+ }
+
+ /* Walk through all the per-object structures and record each of
+ the count values in histogram. */
+ for (gi_ptr = gcov_list; gi_ptr; gi_ptr = gi_ptr->next)
+ {
+ if (!gi_ptr->merge[t_ix])
+ continue;
+
+ /* Find the appropriate index into the gcov_ctr_info array
+ for the counter we are currently working on based on the
+ existence of the merge function pointer for this object. */
+ for (ix = 0, ctr_info_ix = 0; ix < t_ix; ix++)
+ {
+ if (gi_ptr->merge[ix])
+ ctr_info_ix++;
+ }
+ for (f_ix = 0; f_ix != gi_ptr->n_functions; f_ix++)
+ {
+ gfi_ptr = gi_ptr->functions[f_ix];
+
+ if (!gfi_ptr || gfi_ptr->key != gi_ptr)
+ continue;
+
+ ci_ptr = &gfi_ptr->ctrs[ctr_info_ix];
+ for (ix = 0; ix < ci_ptr->num; ix++)
+ gcov_histogram_insert (cs_ptr->histogram, ci_ptr->values[ix]);
+ }
+ }
+}
+
/* Dump the coverage counts. We merge with existing counts when
possible, to avoid growing the .da files ad infinitum. We use this
program's checksum to make sure we only accumulate whole program
@@ -347,6 +423,7 @@ gcov_exit (void)
}
}
}
+ gcov_compute_histogram (&this_prg);
{
/* Check if the level of dirs to strip off specified. */
@@ -400,6 +477,8 @@ gcov_exit (void)
const char *fname, *s;
struct gcov_fn_buffer *fn_buffer = 0;
struct gcov_fn_buffer **fn_tail = &fn_buffer;
+ struct gcov_summary_buffer *next_sum_buffer, *sum_buffer = 0;
+ struct gcov_summary_buffer **sum_tail = &sum_buffer;
fname = gi_ptr->filename;
@@ -482,17 +561,29 @@ gcov_exit (void)
f_ix--;
length = gcov_read_unsigned ();
- if (length != GCOV_TAG_SUMMARY_LENGTH)
- goto read_mismatch;
gcov_read_summary (&tmp);
if ((error = gcov_is_error ()))
goto read_error;
- if (summary_pos || tmp.checksum != crc32)
- goto next_summary;
+ if (summary_pos)
+ {
+ /* Save all summaries after the one that will be
+ merged into below. These will need to be rewritten
+ as histogram merging may change the number of non-zero
+ histogram entries that will be emitted, and thus the
+ size of the merged summary. */
+ (*sum_tail) = (struct gcov_summary_buffer *)
+ malloc (sizeof(struct gcov_summary_buffer));
+ (*sum_tail)->summary = tmp;
+ (*sum_tail)->next = 0;
+ sum_tail = &((*sum_tail)->next);
+ goto next_summary;
+ }
+ if (tmp.checksum != crc32)
+ goto next_summary;
for (t_ix = 0; t_ix != GCOV_COUNTERS_SUMMABLE; t_ix++)
if (tmp.ctrs[t_ix].num != this_prg.ctrs[t_ix].num)
- goto next_summary;
+ goto next_summary;
prg = tmp;
summary_pos = eof_pos;
@@ -598,11 +689,16 @@ gcov_exit (void)
if (gi_ptr->merge[t_ix])
{
if (!cs_prg->runs++)
- cs_prg->num = cs_tprg->num;
+ cs_prg->num = cs_tprg->num;
cs_prg->sum_all += cs_tprg->sum_all;
if (cs_prg->run_max < cs_tprg->run_max)
cs_prg->run_max = cs_tprg->run_max;
cs_prg->sum_max += cs_tprg->run_max;
+ if (cs_prg->runs == 1)
+ memcpy (cs_prg->histogram, cs_tprg->histogram,
+ sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
+ else
+ gcov_histogram_merge (cs_prg->histogram, cs_tprg->histogram);
}
else if (cs_prg->runs)
goto read_mismatch;
@@ -635,8 +731,18 @@ gcov_exit (void)
/* Generate whole program statistics. */
gcov_write_summary (GCOV_TAG_PROGRAM_SUMMARY, &prg);
- if (summary_pos < eof_pos)
- gcov_seek (eof_pos);
+ /* Rewrite all the summaries that were after the summary we merged
+ into. This is necessary as the merged summary may have a different
+ size due to the number of non-zero histogram entries changing after
+ merging. */
+
+ while (sum_buffer)
+ {
+ gcov_write_summary (GCOV_TAG_PROGRAM_SUMMARY, &sum_buffer->summary);
+ next_sum_buffer = sum_buffer->next;
+ free (sum_buffer);
+ sum_buffer = next_sum_buffer;
+ }
/* Write execution counts for each function. */
for (f_ix = 0; (unsigned)f_ix != gi_ptr->n_functions; f_ix++)
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog
index f6539889ee2..15d84858fd1 100644
--- a/libstdc++-v3/ChangeLog
+++ b/libstdc++-v3/ChangeLog
@@ -1,3 +1,49 @@
+2012-09-05 Ulrich Drepper <drepper@gmail.com>
+
+ * include/ext/random: Add __gnu_cxx:normal_mv_distribution<> class.
+ * include/ext/random.tccAdd out-of-line functions for
+ __gnu_cxx::normal_mv_distribution<>.
+ * testsuite/26_numerics/random/normal_mv_distribution/
+ operators/equal.cc: New file.
+ * testsuite/26_numerics/random/normal_mv_distribution/
+ operators/serialize.cc: New file.
+ * testsuite/26_numerics/random/normal_mv_distribution/
+ operators/inequal.cc: New file.
+ * testsuite/26_numerics/random/normal_mv_distribution/
+ cons/default.cc: New file.
+ * testsuite/26_numerics/random/normal_mv_distribution/
+ cons/parms.cc: New file.
+ * testsuite/26_numerics/random/normal_mv_distribution/
+ requirements/explicit_instantiation/1.cc: New file.
+ * testsuite/26_numerics/random/normal_mv_distribution/
+ requirements/typedefs.cc: New file.
+
+2012-09-04 Ulrich Drepper <drepper@gmail.com>
+
+ * include/ext/random: Add __gnu_cxx::beta_distribution<> class.
+ * include/ext/random.tcc: Add out-of-line functions for
+ __gnu_cxx::beta_distribution<>.
+ * testsuite/26_numerics/random/beta_distribution/
+ operators/equal.cc: New file.
+ * testsuite/26_numerics/random/beta_distribution/
+ operators/serialize.cc: New file.
+ * testsuite/26_numerics/random/beta_distribution/
+ operators/inequal.cc: New file.
+ * testsuite/26_numerics/random/beta_distribution/
+ cons/parms.cc: New file.
+ * testsuite/26_numerics/random/beta_distribution/
+ cons/default.cc: New file.
+ * testsuite/26_numerics/random/beta_distribution/
+ requirements/typedefs.cc: New file.
+ * testsuite/26_numerics/random/beta_distribution/
+ requirements/explicit_instantiation/1.cc: New file.
+
+2012-09-04 Steven Bosscher <steven@gcc.gnu.org>
+
+ PR bootstrap/54453
+ * include/Makefile.am: Fix regex.
+ * include/Makefile.in: Regenerate.
+
2012-08-30 Benjamin Kosnik <bkoz@redhat.com>
PR libstdc++/54005 continued
diff --git a/libstdc++-v3/include/Makefile.am b/libstdc++-v3/include/Makefile.am
index d3d6f4af858..bce14ca6736 100644
--- a/libstdc++-v3/include/Makefile.am
+++ b/libstdc++-v3/include/Makefile.am
@@ -1101,7 +1101,7 @@ ${host_builddir}/c++config.h: ${CONFIG_HEADER} \
visibility=`cat stamp-visibility` ;\
externtemplate=`cat stamp-extern-template` ;\
ldbl_compat='s,g,g,' ;\
- grep "^[ ]*#[ ]*define[ ][ ]*_GLIBCXX_LONG_DOUBLE_COMPAT[ ][ ]*1[ ]*$$" \
+ grep "^[ ]*#[ ]*define[ ][ ]*_GLIBCXX_LONG_DOUBLE_COMPAT[ ][ ]*1[ ]*$$" \
${CONFIG_HEADER} > /dev/null 2>&1 \
&& ldbl_compat='s,^#undef _GLIBCXX_LONG_DOUBLE_COMPAT$$,#define _GLIBCXX_LONG_DOUBLE_COMPAT 1,' ;\
sed -e "s,define __GLIBCXX__,define __GLIBCXX__ $$date," \
@@ -1115,7 +1115,7 @@ ${host_builddir}/c++config.h: ${CONFIG_HEADER} \
-e 's/VERSION/_GLIBCXX_VERSION/g' \
-e 's/WORDS_/_GLIBCXX_WORDS_/g' \
-e 's/ICONV_CONST/_GLIBCXX_ICONV_CONST/g' \
- -e '/[ ]_GLIBCXX_LONG_DOUBLE_COMPAT[ ]/d' \
+ -e '/[ ]_GLIBCXX_LONG_DOUBLE_COMPAT[ ]/d' \
< ${CONFIG_HEADER} >> $@ ;\
echo "" >> $@ ;\
echo "#endif // _GLIBCXX_CXX_CONFIG_H" >> $@
diff --git a/libstdc++-v3/include/Makefile.in b/libstdc++-v3/include/Makefile.in
index dc5bf0c33d9..c5537295ff9 100644
--- a/libstdc++-v3/include/Makefile.in
+++ b/libstdc++-v3/include/Makefile.in
@@ -1521,7 +1521,7 @@ ${host_builddir}/c++config.h: ${CONFIG_HEADER} \
visibility=`cat stamp-visibility` ;\
externtemplate=`cat stamp-extern-template` ;\
ldbl_compat='s,g,g,' ;\
- grep "^[ ]*#[ ]*define[ ][ ]*_GLIBCXX_LONG_DOUBLE_COMPAT[ ][ ]*1[ ]*$$" \
+ grep "^[ ]*#[ ]*define[ ][ ]*_GLIBCXX_LONG_DOUBLE_COMPAT[ ][ ]*1[ ]*$$" \
${CONFIG_HEADER} > /dev/null 2>&1 \
&& ldbl_compat='s,^#undef _GLIBCXX_LONG_DOUBLE_COMPAT$$,#define _GLIBCXX_LONG_DOUBLE_COMPAT 1,' ;\
sed -e "s,define __GLIBCXX__,define __GLIBCXX__ $$date," \
@@ -1535,7 +1535,7 @@ ${host_builddir}/c++config.h: ${CONFIG_HEADER} \
-e 's/VERSION/_GLIBCXX_VERSION/g' \
-e 's/WORDS_/_GLIBCXX_WORDS_/g' \
-e 's/ICONV_CONST/_GLIBCXX_ICONV_CONST/g' \
- -e '/[ ]_GLIBCXX_LONG_DOUBLE_COMPAT[ ]/d' \
+ -e '/[ ]_GLIBCXX_LONG_DOUBLE_COMPAT[ ]/d' \
< ${CONFIG_HEADER} >> $@ ;\
echo "" >> $@ ;\
echo "#endif // _GLIBCXX_CXX_CONFIG_H" >> $@
diff --git a/libstdc++-v3/include/ext/random b/libstdc++-v3/include/ext/random
index 05cbc8fa493..6bb438a8558 100644
--- a/libstdc++-v3/include/ext/random
+++ b/libstdc++-v3/include/ext/random
@@ -32,6 +32,7 @@
#pragma GCC system_header
#include <random>
+#include <array>
#ifdef __SSE2__
# include <x86intrin.h>
#endif
@@ -374,6 +375,527 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
0x3bd2b64bU, 0x0c64b1e4U>
sfmt216091_64;
+
+ /**
+ * @brief A beta continuous distribution for random numbers.
+ *
+ * The formula for the beta probability density function is:
+ * @f[
+ * p(x|\alpha,\beta) = \frac{1}{B(\alpha,\beta)}
+ * x^{\alpha - 1} (1 - x)^{\beta - 1}
+ * @f]
+ */
+ template<typename _RealType = double>
+ class beta_distribution
+ {
+ static_assert(std::is_floating_point<_RealType>::value,
+ "template argument not a floating point type");
+
+ public:
+ /** The type of the range of the distribution. */
+ typedef _RealType result_type;
+ /** Parameter type. */
+ struct param_type
+ {
+ typedef beta_distribution<_RealType> distribution_type;
+ friend class beta_distribution<_RealType>;
+
+ explicit
+ param_type(_RealType __alpha_val = _RealType(1),
+ _RealType __beta_val = _RealType(1))
+ : _M_alpha(__alpha_val), _M_beta(__beta_val)
+ {
+ _GLIBCXX_DEBUG_ASSERT(_M_alpha > _RealType(0));
+ _GLIBCXX_DEBUG_ASSERT(_M_beta > _RealType(0));
+ }
+
+ _RealType
+ alpha() const
+ { return _M_alpha; }
+
+ _RealType
+ beta() const
+ { return _M_beta; }
+
+ friend bool
+ operator==(const param_type& __p1, const param_type& __p2)
+ { return (__p1._M_alpha == __p2._M_alpha
+ && __p1._M_beta == __p2._M_beta); }
+
+ private:
+ void
+ _M_initialize();
+
+ _RealType _M_alpha;
+ _RealType _M_beta;
+ };
+
+ public:
+ /**
+ * @brief Constructs a beta distribution with parameters
+ * @f$\alpha@f$ and @f$\beta@f$.
+ */
+ explicit
+ beta_distribution(_RealType __alpha_val = _RealType(1),
+ _RealType __beta_val = _RealType(1))
+ : _M_param(__alpha_val, __beta_val)
+ { }
+
+ explicit
+ beta_distribution(const param_type& __p)
+ : _M_param(__p)
+ { }
+
+ /**
+ * @brief Resets the distribution state.
+ */
+ void
+ reset()
+ { }
+
+ /**
+ * @brief Returns the @f$\alpha@f$ of the distribution.
+ */
+ _RealType
+ alpha() const
+ { return _M_param.alpha(); }
+
+ /**
+ * @brief Returns the @f$\beta@f$ of the distribution.
+ */
+ _RealType
+ beta() const
+ { return _M_param.beta(); }
+
+ /**
+ * @brief Returns the parameter set of the distribution.
+ */
+ param_type
+ param() const
+ { return _M_param; }
+
+ /**
+ * @brief Sets the parameter set of the distribution.
+ * @param __param The new parameter set of the distribution.
+ */
+ void
+ param(const param_type& __param)
+ { _M_param = __param; }
+
+ /**
+ * @brief Returns the greatest lower bound value of the distribution.
+ */
+ result_type
+ min() const
+ { return result_type(0); }
+
+ /**
+ * @brief Returns the least upper bound value of the distribution.
+ */
+ result_type
+ max() const
+ { return result_type(1); }
+
+ /**
+ * @brief Generating functions.
+ */
+ template<typename _UniformRandomNumberGenerator>
+ result_type
+ operator()(_UniformRandomNumberGenerator& __urng)
+ { return this->operator()(__urng, this->param()); }
+
+ template<typename _UniformRandomNumberGenerator>
+ result_type
+ operator()(_UniformRandomNumberGenerator& __urng,
+ const param_type& __p);
+
+ template<typename _ForwardIterator,
+ typename _UniformRandomNumberGenerator>
+ void
+ __generate(_ForwardIterator __f, _ForwardIterator __t,
+ _UniformRandomNumberGenerator& __urng)
+ { this->__generate(__f, __t, __urng, this->param()); }
+
+ template<typename _ForwardIterator,
+ typename _UniformRandomNumberGenerator>
+ void
+ __generate(_ForwardIterator __f, _ForwardIterator __t,
+ _UniformRandomNumberGenerator& __urng,
+ const param_type& __p)
+ { this->__generate_impl(__f, __t, __urng, __p); }
+
+ template<typename _UniformRandomNumberGenerator>
+ void
+ __generate(result_type* __f, result_type* __t,
+ _UniformRandomNumberGenerator& __urng,
+ const param_type& __p)
+ { this->__generate_impl(__f, __t, __urng, __p); }
+
+ /**
+ * @brief Return true if two beta distributions have the same
+ * parameters and the sequences that would be generated
+ * are equal.
+ */
+ friend bool
+ operator==(const beta_distribution& __d1,
+ const beta_distribution& __d2)
+ { return __d1.param() == __d2.param(); }
+
+ /**
+ * @brief Inserts a %beta_distribution random number distribution
+ * @p __x into the output stream @p __os.
+ *
+ * @param __os An output stream.
+ * @param __x A %beta_distribution random number distribution.
+ *
+ * @returns The output stream with the state of @p __x inserted or in
+ * an error state.
+ */
+ template<typename _RealType1, typename _CharT, typename _Traits>
+ friend std::basic_ostream<_CharT, _Traits>&
+ operator<<(std::basic_ostream<_CharT, _Traits>& __os,
+ const __gnu_cxx::beta_distribution<_RealType1>& __x);
+
+ /**
+ * @brief Extracts a %beta_distribution random number distribution
+ * @p __x from the input stream @p __is.
+ *
+ * @param __is An input stream.
+ * @param __x A %beta_distribution random number generator engine.
+ *
+ * @returns The input stream with @p __x extracted or in an error state.
+ */
+ template<typename _RealType1, typename _CharT, typename _Traits>
+ friend std::basic_istream<_CharT, _Traits>&
+ operator>>(std::basic_istream<_CharT, _Traits>& __is,
+ __gnu_cxx::beta_distribution<_RealType1>& __x);
+
+ private:
+ template<typename _ForwardIterator,
+ typename _UniformRandomNumberGenerator>
+ void
+ __generate_impl(_ForwardIterator __f, _ForwardIterator __t,
+ _UniformRandomNumberGenerator& __urng,
+ const param_type& __p);
+
+ param_type _M_param;
+ };
+
+ /**
+ * @brief Return true if two beta distributions are different.
+ */
+ template<typename _RealType>
+ inline bool
+ operator!=(const __gnu_cxx::beta_distribution<_RealType>& __d1,
+ const __gnu_cxx::beta_distribution<_RealType>& __d2)
+ { return !(__d1 == __d2); }
+
+
+ /**
+ * @brief A multi-variate normal continuous distribution for random numbers.
+ *
+ * The formula for the normal probability density function is
+ * @f[
+ * p(\overrightarrow{x}|\overrightarrow{\mu },\Sigma) =
+ * \frac{1}{\sqrt{(2\pi )^k\det(\Sigma))}}
+ * e^{-\frac{1}{2}(\overrightarrow{x}-\overrightarrow{\mu})^\text{T}
+ * \Sigma ^{-1}(\overrightarrow{x}-\overrightarrow{\mu})}
+ * @f]
+ *
+ * where @f$\overrightarrow{x}@f$ and @f$\overrightarrow{\mu}@f$ are
+ * vectors of dimension @f$k@f$ and @f$\Sigma@f$ is the covariance
+ * matrix (which must be positive-definite).
+ */
+ template<std::size_t _Dimen, typename _RealType = double>
+ class normal_mv_distribution
+ {
+ static_assert(std::is_floating_point<_RealType>::value,
+ "template argument not a floating point type");
+ static_assert(_Dimen != 0, "dimension is zero");
+
+ public:
+ /** The type of the range of the distribution. */
+ typedef std::array<_RealType, _Dimen> result_type;
+ /** Parameter type. */
+ class param_type
+ {
+ static constexpr size_t _M_t_size = _Dimen * (_Dimen + 1) / 2;
+
+ public:
+ typedef normal_mv_distribution<_Dimen, _RealType> distribution_type;
+ friend class normal_mv_distribution<_Dimen, _RealType>;
+
+ param_type()
+ {
+ std::fill(_M_mean.begin(), _M_mean.end(), _RealType(0));
+ auto __it = _M_t.begin();
+ for (size_t __i = 0; __i < _Dimen; ++__i)
+ {
+ std::fill_n(__it, __i, _RealType(0));
+ __it += __i;
+ *__it++ = _RealType(1);
+ }
+ }
+
+ template<typename _ForwardIterator1, typename _ForwardIterator2>
+ param_type(_ForwardIterator1 __meanbegin,
+ _ForwardIterator1 __meanend,
+ _ForwardIterator2 __varcovbegin,
+ _ForwardIterator2 __varcovend)
+ {
+ __glibcxx_function_requires(_ForwardIteratorConcept<
+ _ForwardIterator1>)
+ __glibcxx_function_requires(_ForwardIteratorConcept<
+ _ForwardIterator2>)
+ _GLIBCXX_DEBUG_ASSERT(std::distance(__meanbegin, __meanend)
+ <= _Dimen);
+ const auto __dist = std::distance(__varcovbegin, __varcovend);
+ _GLIBCXX_DEBUG_ASSERT(__dist == _Dimen * _Dimen
+ || __dist == _Dimen * (_Dimen + 1) / 2
+ || __dist == _Dimen);
+
+ if (__dist == _Dimen * _Dimen)
+ _M_init_full(__meanbegin, __meanend, __varcovbegin, __varcovend);
+ else if (__dist == _Dimen * (_Dimen + 1) / 2)
+ _M_init_lower(__meanbegin, __meanend, __varcovbegin, __varcovend);
+ else
+ _M_init_diagonal(__meanbegin, __meanend,
+ __varcovbegin, __varcovend);
+ }
+
+ param_type(std::initializer_list<_RealType> __mean,
+ std::initializer_list<_RealType> __varcov)
+ {
+ _GLIBCXX_DEBUG_ASSERT(__mean.size() <= _Dimen);
+ _GLIBCXX_DEBUG_ASSERT(__varcov.size() == _Dimen * _Dimen
+ || __varcov.size() == _Dimen * (_Dimen + 1) / 2
+ || __varcov.size() == _Dimen);
+
+ if (__varcov.size() == _Dimen * _Dimen)
+ _M_init_full(__mean.begin(), __mean.end(),
+ __varcov.begin(), __varcov.end());
+ else if (__varcov.size() == _Dimen * (_Dimen + 1) / 2)
+ _M_init_lower(__mean.begin(), __mean.end(),
+ __varcov.begin(), __varcov.end());
+ else
+ _M_init_diagonal(__mean.begin(), __mean.end(),
+ __varcov.begin(), __varcov.end());
+ }
+
+ std::array<_RealType, _Dimen>
+ mean() const
+ { return _M_mean; }
+
+ std::array<_RealType, _M_t_size>
+ varcov() const
+ { return _M_t; }
+
+ friend bool
+ operator==(const param_type& __p1, const param_type& __p2)
+ { return __p1._M_mean == __p2._M_mean && __p1._M_t == __p2._M_t; }
+
+ private:
+ template <typename _InputIterator1, typename _InputIterator2>
+ void _M_init_full(_InputIterator1 __meanbegin,
+ _InputIterator1 __meanend,
+ _InputIterator2 __varcovbegin,
+ _InputIterator2 __varcovend);
+ template <typename _InputIterator1, typename _InputIterator2>
+ void _M_init_lower(_InputIterator1 __meanbegin,
+ _InputIterator1 __meanend,
+ _InputIterator2 __varcovbegin,
+ _InputIterator2 __varcovend);
+ template <typename _InputIterator1, typename _InputIterator2>
+ void _M_init_diagonal(_InputIterator1 __meanbegin,
+ _InputIterator1 __meanend,
+ _InputIterator2 __varbegin,
+ _InputIterator2 __varend);
+
+ std::array<_RealType, _Dimen> _M_mean;
+ std::array<_RealType, _M_t_size> _M_t;
+ };
+
+ public:
+ normal_mv_distribution()
+ : _M_param(), _M_nd()
+ { }
+
+ template<typename _ForwardIterator1, typename _ForwardIterator2>
+ normal_mv_distribution(_ForwardIterator1 __meanbegin,
+ _ForwardIterator1 __meanend,
+ _ForwardIterator2 __varcovbegin,
+ _ForwardIterator2 __varcovend)
+ : _M_param(__meanbegin, __meanend, __varcovbegin, __varcovend),
+ _M_nd()
+ { }
+
+ normal_mv_distribution(std::initializer_list<_RealType> __mean,
+ std::initializer_list<_RealType> __varcov)
+ : _M_param(__mean, __varcov), _M_nd()
+ { }
+
+ explicit
+ normal_mv_distribution(const param_type& __p)
+ : _M_param(__p), _M_nd()
+ { }
+
+ /**
+ * @brief Resets the distribution state.
+ */
+ void
+ reset()
+ { _M_nd.reset(); }
+
+ /**
+ * @brief Returns the mean of the distribution.
+ */
+ result_type
+ mean() const
+ { return _M_param.mean(); }
+
+ /**
+ * @brief Returns the compact form of the variance/covariance
+ * matrix of the distribution.
+ */
+ std::array<_RealType, _Dimen * (_Dimen + 1) / 2>
+ varcov() const
+ { return _M_param.varcov(); }
+
+ /**
+ * @brief Returns the parameter set of the distribution.
+ */
+ param_type
+ param() const
+ { return _M_param; }
+
+ /**
+ * @brief Sets the parameter set of the distribution.
+ * @param __param The new parameter set of the distribution.
+ */
+ void
+ param(const param_type& __param)
+ { _M_param = __param; }
+
+ /**
+ * @brief Returns the greatest lower bound value of the distribution.
+ */
+ result_type
+ min() const
+ { result_type __res;
+ __res.fill(std::numeric_limits<_RealType>::min());
+ return __res; }
+
+ /**
+ * @brief Returns the least upper bound value of the distribution.
+ */
+ result_type
+ max() const
+ { result_type __res;
+ __res.fill(std::numeric_limits<_RealType>::max());
+ return __res; }
+
+ /**
+ * @brief Generating functions.
+ */
+ template<typename _UniformRandomNumberGenerator>
+ result_type
+ operator()(_UniformRandomNumberGenerator& __urng)
+ { return this->operator()(__urng, this->param()); }
+
+ template<typename _UniformRandomNumberGenerator>
+ result_type
+ operator()(_UniformRandomNumberGenerator& __urng,
+ const param_type& __p);
+
+ template<typename _ForwardIterator,
+ typename _UniformRandomNumberGenerator>
+ void
+ __generate(_ForwardIterator __f, _ForwardIterator __t,
+ _UniformRandomNumberGenerator& __urng)
+ { return this->__generate_impl(__f, __t, __urng, this->param()); }
+
+ template<typename _ForwardIterator,
+ typename _UniformRandomNumberGenerator>
+ void
+ __generate(_ForwardIterator __f, _ForwardIterator __t,
+ _UniformRandomNumberGenerator& __urng,
+ const param_type& __p)
+ { return this->__generate_impl(__f, __t, __urng, __p); }
+
+ /**
+ * @brief Return true if two multi-variant normal distributions have
+ * the same parameters and the sequences that would
+ * be generated are equal.
+ */
+ template<size_t _Dimen1, typename _RealType1>
+ friend bool
+ operator==(const
+ __gnu_cxx::normal_mv_distribution<_Dimen1, _RealType1>&
+ __d1,
+ const
+ __gnu_cxx::normal_mv_distribution<_Dimen1, _RealType1>&
+ __d2);
+
+ /**
+ * @brief Inserts a %normal_mv_distribution random number distribution
+ * @p __x into the output stream @p __os.
+ *
+ * @param __os An output stream.
+ * @param __x A %normal_mv_distribution random number distribution.
+ *
+ * @returns The output stream with the state of @p __x inserted or in
+ * an error state.
+ */
+ template<size_t _Dimen1, typename _RealType1,
+ typename _CharT, typename _Traits>
+ friend std::basic_ostream<_CharT, _Traits>&
+ operator<<(std::basic_ostream<_CharT, _Traits>& __os,
+ const
+ __gnu_cxx::normal_mv_distribution<_Dimen1, _RealType1>&
+ __x);
+
+ /**
+ * @brief Extracts a %normal_mv_distribution random number distribution
+ * @p __x from the input stream @p __is.
+ *
+ * @param __is An input stream.
+ * @param __x A %normal_mv_distribution random number generator engine.
+ *
+ * @returns The input stream with @p __x extracted or in an error
+ * state.
+ */
+ template<size_t _Dimen1, typename _RealType1,
+ typename _CharT, typename _Traits>
+ friend std::basic_istream<_CharT, _Traits>&
+ operator>>(std::basic_istream<_CharT, _Traits>& __is,
+ __gnu_cxx::normal_mv_distribution<_Dimen1, _RealType1>&
+ __x);
+
+ private:
+ template<typename _ForwardIterator,
+ typename _UniformRandomNumberGenerator>
+ void
+ __generate_impl(_ForwardIterator __f, _ForwardIterator __t,
+ _UniformRandomNumberGenerator& __urng,
+ const param_type& __p);
+
+ param_type _M_param;
+ std::normal_distribution<_RealType> _M_nd;
+ };
+
+ /**
+ * @brief Return true if two multi-variate normal distributions are
+ * different.
+ */
+ template<size_t _Dimen, typename _RealType>
+ inline bool
+ operator!=(const __gnu_cxx::normal_mv_distribution<_Dimen, _RealType>&
+ __d1,
+ const __gnu_cxx::normal_mv_distribution<_Dimen, _RealType>&
+ __d2)
+ { return !(__d1 == __d2); }
+
+
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
diff --git a/libstdc++-v3/include/ext/random.tcc b/libstdc++-v3/include/ext/random.tcc
index 2a6fde0208f..0fa006af0bd 100644
--- a/libstdc++-v3/include/ext/random.tcc
+++ b/libstdc++-v3/include/ext/random.tcc
@@ -438,6 +438,320 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
return __is;
}
+
+ /**
+ * Iteration method due to M.D. J<o:>hnk.
+ *
+ * M.D. J<o:>hnk, Erzeugung von betaverteilten und gammaverteilten
+ * Zufallszahlen, Metrika, Volume 8, 1964
+ */
+ template<typename _RealType>
+ template<typename _UniformRandomNumberGenerator>
+ typename beta_distribution<_RealType>::result_type
+ beta_distribution<_RealType>::
+ operator()(_UniformRandomNumberGenerator& __urng,
+ const param_type& __param)
+ {
+ std::__detail::_Adaptor<_UniformRandomNumberGenerator, result_type>
+ __aurng(__urng);
+
+ result_type __x, __y;
+ do
+ {
+ __x = std::exp(std::log(__aurng()) / __param.alpha());
+ __y = std::exp(std::log(__aurng()) / __param.beta());
+ }
+ while (__x + __y > result_type(1));
+
+ return __x / (__x + __y);
+ }
+
+ template<typename _RealType>
+ template<typename _OutputIterator,
+ typename _UniformRandomNumberGenerator>
+ void
+ beta_distribution<_RealType>::
+ __generate_impl(_OutputIterator __f, _OutputIterator __t,
+ _UniformRandomNumberGenerator& __urng,
+ const param_type& __param)
+ {
+ __glibcxx_function_requires(_OutputIteratorConcept<_OutputIterator>)
+
+ std::__detail::_Adaptor<_UniformRandomNumberGenerator, result_type>
+ __aurng(__urng);
+
+ while (__f != __t)
+ {
+ result_type __x, __y;
+ do
+ {
+ __x = std::exp(std::log(__aurng()) / __param.alpha());
+ __y = std::exp(std::log(__aurng()) / __param.beta());
+ }
+ while (__x + __y > result_type(1));
+
+ *__f++ = __x / (__x + __y);
+ }
+ }
+
+ template<typename _RealType, typename _CharT, typename _Traits>
+ std::basic_ostream<_CharT, _Traits>&
+ operator<<(std::basic_ostream<_CharT, _Traits>& __os,
+ const __gnu_cxx::beta_distribution<_RealType>& __x)
+ {
+ typedef std::basic_ostream<_CharT, _Traits> __ostream_type;
+ typedef typename __ostream_type::ios_base __ios_base;
+
+ const typename __ios_base::fmtflags __flags = __os.flags();
+ const _CharT __fill = __os.fill();
+ const std::streamsize __precision = __os.precision();
+ const _CharT __space = __os.widen(' ');
+ __os.flags(__ios_base::scientific | __ios_base::left);
+ __os.fill(__space);
+ __os.precision(std::numeric_limits<_RealType>::max_digits10);
+
+ __os << __x.alpha() << __space << __x.beta();
+
+ __os.flags(__flags);
+ __os.fill(__fill);
+ __os.precision(__precision);
+ return __os;
+ }
+
+ template<typename _RealType, typename _CharT, typename _Traits>
+ std::basic_istream<_CharT, _Traits>&
+ operator>>(std::basic_istream<_CharT, _Traits>& __is,
+ __gnu_cxx::beta_distribution<_RealType>& __x)
+ {
+ typedef std::basic_istream<_CharT, _Traits> __istream_type;
+ typedef typename __istream_type::ios_base __ios_base;
+
+ const typename __ios_base::fmtflags __flags = __is.flags();
+ __is.flags(__ios_base::dec | __ios_base::skipws);
+
+ _RealType __alpha_val, __beta_val;
+ __is >> __alpha_val >> __beta_val;
+ __x.param(typename __gnu_cxx::beta_distribution<_RealType>::
+ param_type(__alpha_val, __beta_val));
+
+ __is.flags(__flags);
+ return __is;
+ }
+
+
+ template<std::size_t _Dimen, typename _RealType>
+ template<typename _InputIterator1, typename _InputIterator2>
+ void
+ normal_mv_distribution<_Dimen, _RealType>::param_type::
+ _M_init_full(_InputIterator1 __meanbegin, _InputIterator1 __meanend,
+ _InputIterator2 __varcovbegin, _InputIterator2 __varcovend)
+ {
+ __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
+ __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
+ std::fill(std::copy(__meanbegin, __meanend, _M_mean.begin()),
+ _M_mean.end(), _RealType(0));
+
+ // Perform the Cholesky decomposition
+ auto __w = _M_t.begin();
+ for (size_t __j = 0; __j < _Dimen; ++__j)
+ {
+ _RealType __sum = _RealType(0);
+
+ auto __slitbegin = __w;
+ auto __cit = _M_t.begin();
+ for (size_t __i = 0; __i < __j; ++__i)
+ {
+ auto __slit = __slitbegin;
+ _RealType __s = *__varcovbegin++;
+ for (size_t __k = 0; __k < __i; ++__k)
+ __s -= *__slit++ * *__cit++;
+
+ *__w++ = __s /= *__cit++;
+ __sum += __s * __s;
+ }
+
+ __sum = *__varcovbegin - __sum;
+ if (__builtin_expect(__sum <= _RealType(0), 0))
+ std::__throw_runtime_error(__N("normal_mv_distribution::"
+ "param_type::_M_init_full"));
+ *__w++ = std::sqrt(__sum);
+
+ std::advance(__varcovbegin, _Dimen - __j);
+ }
+ }
+
+ template<std::size_t _Dimen, typename _RealType>
+ template<typename _InputIterator1, typename _InputIterator2>
+ void
+ normal_mv_distribution<_Dimen, _RealType>::param_type::
+ _M_init_lower(_InputIterator1 __meanbegin, _InputIterator1 __meanend,
+ _InputIterator2 __varcovbegin, _InputIterator2 __varcovend)
+ {
+ __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
+ __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
+ std::fill(std::copy(__meanbegin, __meanend, _M_mean.begin()),
+ _M_mean.end(), _RealType(0));
+
+ // Perform the Cholesky decomposition
+ auto __w = _M_t.begin();
+ for (size_t __j = 0; __j < _Dimen; ++__j)
+ {
+ _RealType __sum = _RealType(0);
+
+ auto __slitbegin = __w;
+ auto __cit = _M_t.begin();
+ for (size_t __i = 0; __i < __j; ++__i)
+ {
+ auto __slit = __slitbegin;
+ _RealType __s = *__varcovbegin++;
+ for (size_t __k = 0; __k < __i; ++__k)
+ __s -= *__slit++ * *__cit++;
+
+ *__w++ = __s /= *__cit++;
+ __sum += __s * __s;
+ }
+
+ __sum = *__varcovbegin++ - __sum;
+ if (__builtin_expect(__sum <= _RealType(0), 0))
+ std::__throw_runtime_error(__N("normal_mv_distribution::"
+ "param_type::_M_init_full"));
+ *__w++ = std::sqrt(__sum);
+ }
+ }
+
+ template<std::size_t _Dimen, typename _RealType>
+ template<typename _InputIterator1, typename _InputIterator2>
+ void
+ normal_mv_distribution<_Dimen, _RealType>::param_type::
+ _M_init_diagonal(_InputIterator1 __meanbegin, _InputIterator1 __meanend,
+ _InputIterator2 __varbegin, _InputIterator2 __varend)
+ {
+ __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>)
+ __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>)
+ std::fill(std::copy(__meanbegin, __meanend, _M_mean.begin()),
+ _M_mean.end(), _RealType(0));
+
+ auto __w = _M_t.begin();
+ size_t __step = 0;
+ while (__varbegin != __varend)
+ {
+ std::fill_n(__w, __step, _RealType(0));
+ __w += __step++;
+ if (__builtin_expect(*__varbegin < _RealType(0), 0))
+ std::__throw_runtime_error(__N("normal_mv_distribution::"
+ "param_type::_M_init_diagonal"));
+ *__w++ = std::sqrt(*__varbegin++);
+ }
+ }
+
+ template<std::size_t _Dimen, typename _RealType>
+ template<typename _UniformRandomNumberGenerator>
+ typename normal_mv_distribution<_Dimen, _RealType>::result_type
+ normal_mv_distribution<_Dimen, _RealType>::
+ operator()(_UniformRandomNumberGenerator& __urng,
+ const param_type& __param)
+ {
+ result_type __ret;
+
+ for (size_t __i = 0; __i < _Dimen; ++__i)
+ __ret[__i] = _M_nd(__urng);
+
+ auto __t_it = __param._M_t.crbegin();
+ for (size_t __i = _Dimen; __i > 0; --__i)
+ {
+ _RealType __sum = _RealType(0);
+ for (size_t __j = __i; __j > 0; --__j)
+ __sum += __ret[__j - 1] * *__t_it++;
+ __ret[__i - 1] = __sum;
+ }
+
+ return __ret;
+ }
+
+ template<std::size_t _Dimen, typename _RealType>
+ template<typename _ForwardIterator, typename _UniformRandomNumberGenerator>
+ void
+ normal_mv_distribution<_Dimen, _RealType>::
+ __generate_impl(_ForwardIterator __f, _ForwardIterator __t,
+ _UniformRandomNumberGenerator& __urng,
+ const param_type& __param)
+ {
+ __glibcxx_function_requires(_Mutable_ForwardIteratorConcept<
+ _ForwardIterator>)
+ while (__f != __t)
+ *__f++ = this->operator()(__urng, __param);
+ }
+
+ template<size_t _Dimen, typename _RealType>
+ bool
+ operator==(const __gnu_cxx::normal_mv_distribution<_Dimen, _RealType>&
+ __d1,
+ const __gnu_cxx::normal_mv_distribution<_Dimen, _RealType>&
+ __d2)
+ {
+ return __d1._M_param == __d2._M_param && __d1._M_nd == __d2._M_nd;
+ }
+
+ template<size_t _Dimen, typename _RealType, typename _CharT, typename _Traits>
+ std::basic_ostream<_CharT, _Traits>&
+ operator<<(std::basic_ostream<_CharT, _Traits>& __os,
+ const __gnu_cxx::normal_mv_distribution<_Dimen, _RealType>& __x)
+ {
+ typedef std::basic_ostream<_CharT, _Traits> __ostream_type;
+ typedef typename __ostream_type::ios_base __ios_base;
+
+ const typename __ios_base::fmtflags __flags = __os.flags();
+ const _CharT __fill = __os.fill();
+ const std::streamsize __precision = __os.precision();
+ const _CharT __space = __os.widen(' ');
+ __os.flags(__ios_base::scientific | __ios_base::left);
+ __os.fill(__space);
+ __os.precision(std::numeric_limits<_RealType>::max_digits10);
+
+ auto __mean = __x._M_param.mean();
+ for (auto __it : __mean)
+ __os << __it << __space;
+ auto __t = __x._M_param.varcov();
+ for (auto __it : __t)
+ __os << __it << __space;
+
+ __os << __x._M_nd;
+
+ __os.flags(__flags);
+ __os.fill(__fill);
+ __os.precision(__precision);
+ return __os;
+ }
+
+ template<size_t _Dimen, typename _RealType, typename _CharT, typename _Traits>
+ std::basic_istream<_CharT, _Traits>&
+ operator>>(std::basic_istream<_CharT, _Traits>& __is,
+ __gnu_cxx::normal_mv_distribution<_Dimen, _RealType>& __x)
+ {
+ typedef std::basic_istream<_CharT, _Traits> __istream_type;
+ typedef typename __istream_type::ios_base __ios_base;
+
+ const typename __ios_base::fmtflags __flags = __is.flags();
+ __is.flags(__ios_base::dec | __ios_base::skipws);
+
+ std::array<_RealType, _Dimen> __mean;
+ for (auto& __it : __mean)
+ __is >> __it;
+ std::array<_RealType, _Dimen * (_Dimen + 1) / 2> __varcov;
+ for (auto& __it : __varcov)
+ __is >> __it;
+
+ __is >> __x._M_nd;
+
+ __x.param(typename normal_mv_distribution<_Dimen, _RealType>::
+ param_type(__mean.begin(), __mean.end(),
+ __varcov.begin(), __varcov.end()));
+
+ __is.flags(__flags);
+ return __is;
+ }
+
+
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace
diff --git a/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/cons/default.cc b/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/cons/default.cc
new file mode 100644
index 00000000000..3aa2d851e98
--- /dev/null
+++ b/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/cons/default.cc
@@ -0,0 +1,43 @@
+// { dg-options "-std=c++0x" }
+// { dg-require-cstdint "" }
+//
+// 2008-11-24 Edward M. Smith-Rowland <3dw4rd@verizon.net>
+// 2012-09-04 Ulrich Drepper <drepper@gmail.com>
+//
+// Copyright (C) 2012 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <ext/random>
+#include <testsuite_hooks.h>
+
+void
+test01()
+{
+ bool test __attribute__((unused)) = true;
+
+ __gnu_cxx::beta_distribution<> u;
+ VERIFY( u.alpha() == 1.0 );
+ VERIFY( u.beta() == 1.0 );
+ VERIFY( u.min() == 0.0 );
+ VERIFY( u.max() == 1.0 );
+}
+
+int main()
+{
+ test01();
+ return 0;
+}
diff --git a/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/cons/parms.cc b/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/cons/parms.cc
new file mode 100644
index 00000000000..9d6c0b18da7
--- /dev/null
+++ b/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/cons/parms.cc
@@ -0,0 +1,43 @@
+// { dg-options "-std=c++0x" }
+// { dg-require-cstdint "" }
+//
+// 2008-11-24 Edward M. Smith-Rowland <3dw4rd@verizon.net>
+// 2012-09-04 Ulrich Drepper <drepper@gmail.com>
+//
+// Copyright (C) 2012 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <ext/random>
+#include <testsuite_hooks.h>
+
+void
+test01()
+{
+ bool test __attribute__((unused)) = true;
+
+ __gnu_cxx::beta_distribution<> u(1.5, 3.0);
+ VERIFY( u.alpha() == 1.5 );
+ VERIFY( u.beta() == 3.0 );
+ VERIFY( u.min() == 0.0 );
+ VERIFY( u.max() == 1.0 );
+}
+
+int main()
+{
+ test01();
+ return 0;
+}
diff --git a/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/operators/equal.cc b/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/operators/equal.cc
new file mode 100644
index 00000000000..a31a6545d53
--- /dev/null
+++ b/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/operators/equal.cc
@@ -0,0 +1,42 @@
+// { dg-options "-std=c++0x" }
+// { dg-require-cstdint "" }
+//
+// 2010-03-16 Paolo Carlini <paolo.carlini@oracle.com>
+// 2012-09-04 Ulrich Drepper <drepper@gmail.com>
+//
+// Copyright (C) 2012 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <ext/random>
+#include <testsuite_hooks.h>
+
+void
+test01()
+{
+ bool test __attribute__((unused)) = true;
+
+ __gnu_cxx::beta_distribution<double> u(1.5, 3.0), v, w;
+
+ VERIFY( v == w );
+ VERIFY( !(u == v) );
+}
+
+int main()
+{
+ test01();
+ return 0;
+}
diff --git a/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/operators/inequal.cc b/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/operators/inequal.cc
new file mode 100644
index 00000000000..d7eda3935bf
--- /dev/null
+++ b/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/operators/inequal.cc
@@ -0,0 +1,42 @@
+// { dg-options "-std=c++0x" }
+// { dg-require-cstdint "" }
+//
+// 2010-03-16 Paolo Carlini <paolo.carlini@oracle.com>
+// 2012-09-04 Ulrich Drepper <drepper@gmail.com>
+//
+// Copyright (C) 2012 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <ext/random>
+#include <testsuite_hooks.h>
+
+void
+test01()
+{
+ bool test __attribute__((unused)) = true;
+
+ __gnu_cxx::beta_distribution<double> u(1.5, 3.0), v, w;
+
+ VERIFY( u != v );
+ VERIFY( !(v != w) );
+}
+
+int main()
+{
+ test01();
+ return 0;
+}
diff --git a/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/operators/serialize.cc b/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/operators/serialize.cc
new file mode 100644
index 00000000000..dd2fed8d590
--- /dev/null
+++ b/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/operators/serialize.cc
@@ -0,0 +1,44 @@
+// { dg-options "-std=c++0x" }
+// { dg-require-cstdint "" }
+//
+// 2009-08-14 Edward M. Smith-Rowland <3dw4rd@verizon.net>
+// 2012-09-04 Ulrich Drepper <drepper@gmail.com>
+//
+// Copyright (C) 2012 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <ext/random>
+#include <sstream>
+
+void
+test01()
+{
+ std::stringstream str;
+ __gnu_cxx::beta_distribution<double> u(1.5, 3.0), v;
+ std::minstd_rand0 rng;
+
+ u(rng); // advance
+ str << u;
+
+ str >> v;
+}
+
+int main()
+{
+ test01();
+ return 0;
+}
diff --git a/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/requirements/explicit_instantiation/1.cc b/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/requirements/explicit_instantiation/1.cc
new file mode 100644
index 00000000000..a572b1478ae
--- /dev/null
+++ b/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/requirements/explicit_instantiation/1.cc
@@ -0,0 +1,26 @@
+// { dg-do compile }
+// { dg-options "-std=c++11" }
+// { dg-require-cstdint "" }
+//
+// Copyright (C) 2012 Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <ext/random>
+
+template class __gnu_cxx::beta_distribution<float>;
+template class __gnu_cxx::beta_distribution<double>;
+template class __gnu_cxx::beta_distribution<long double>;
diff --git a/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/requirements/typedefs.cc b/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/requirements/typedefs.cc
new file mode 100644
index 00000000000..33b18ae535c
--- /dev/null
+++ b/libstdc++-v3/testsuite/26_numerics/random/beta_distribution/requirements/typedefs.cc
@@ -0,0 +1,34 @@
+// { dg-do compile }
+// { dg-options "-std=c++0x" }
+// { dg-require-cstdint "" }
+//
+// 2008-11-24 Edward M. Smith-Rowland <3dw4rd@verizon.net>
+// 2012-09-04 Ulrich Drepper <drepper@gmail.com>
+//
+// Copyright (C) 2012 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <ext/random>
+
+void
+test01()
+{
+ typedef __gnu_cxx::beta_distribution<double> test_type;
+
+ typedef test_type::result_type result_type;
+ typedef test_type::param_type param_type;
+}
diff --git a/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/cons/default.cc b/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/cons/default.cc
new file mode 100644
index 00000000000..a51fde40f03
--- /dev/null
+++ b/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/cons/default.cc
@@ -0,0 +1,49 @@
+// { dg-options "-std=c++0x" }
+// { dg-require-cstdint "" }
+//
+// 2008-11-24 Edward M. Smith-Rowland <3dw4rd@verizon.net>
+// 2012-09-04 Ulrich Drepper <drepper@gmail.com>
+//
+// Copyright (C) 2012 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <ext/random>
+#include <testsuite_hooks.h>
+
+void
+test01()
+{
+ bool test __attribute__((unused)) = true;
+
+ __gnu_cxx::normal_mv_distribution<2> u;
+ VERIFY( u.mean()[0] == 0.0 );
+ VERIFY( u.mean()[1] == 0.0 );
+ VERIFY( u.varcov()[0] == 1.0 );
+ VERIFY( u.varcov()[1] == 0.0 );
+ VERIFY( u.varcov()[2] == 1.0 );
+ typedef __gnu_cxx::normal_mv_distribution<2>::result_type result_type;
+ VERIFY( u.min()[0] == std::numeric_limits<result_type::value_type>::min() );
+ VERIFY( u.max()[0] == std::numeric_limits<result_type::value_type>::max() );
+ VERIFY( u.min()[1] == std::numeric_limits<result_type::value_type>::min() );
+ VERIFY( u.max()[1] == std::numeric_limits<result_type::value_type>::max() );
+}
+
+int main()
+{
+ test01();
+ return 0;
+}
diff --git a/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/cons/parms.cc b/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/cons/parms.cc
new file mode 100644
index 00000000000..d12722183e1
--- /dev/null
+++ b/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/cons/parms.cc
@@ -0,0 +1,49 @@
+// { dg-options "-std=c++0x" }
+// { dg-require-cstdint "" }
+//
+// 2008-11-24 Edward M. Smith-Rowland <3dw4rd@verizon.net>
+// 2012-09-04 Ulrich Drepper <drepper@gmail.com>
+//
+// Copyright (C) 2012 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <ext/random>
+#include <testsuite_hooks.h>
+
+void
+test01()
+{
+ bool test __attribute__((unused)) = true;
+
+ __gnu_cxx::normal_mv_distribution<2> u({5.0, 4.0}, {4.0, 9.0});
+ VERIFY( u.mean()[0] == 5.0 );
+ VERIFY( u.mean()[1] == 4.0 );
+ VERIFY( u.varcov()[0] == 2.0 );
+ VERIFY( u.varcov()[1] == 0.0 );
+ VERIFY( u.varcov()[2] == 3.0 );
+ typedef __gnu_cxx::normal_mv_distribution<2>::result_type result_type;
+ VERIFY( u.min()[0] == std::numeric_limits<result_type::value_type>::min() );
+ VERIFY( u.max()[0] == std::numeric_limits<result_type::value_type>::max() );
+ VERIFY( u.min()[1] == std::numeric_limits<result_type::value_type>::min() );
+ VERIFY( u.max()[1] == std::numeric_limits<result_type::value_type>::max() );
+}
+
+int main()
+{
+ test01();
+ return 0;
+}
diff --git a/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/operators/equal.cc b/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/operators/equal.cc
new file mode 100644
index 00000000000..99b75817947
--- /dev/null
+++ b/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/operators/equal.cc
@@ -0,0 +1,42 @@
+// { dg-options "-std=c++0x" }
+// { dg-require-cstdint "" }
+//
+// 2010-03-16 Paolo Carlini <paolo.carlini@oracle.com>
+// 2012-09-04 Ulrich Drepper <drepper@gmail.com>
+//
+// Copyright (C) 2012 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <ext/random>
+#include <testsuite_hooks.h>
+
+void
+test01()
+{
+ bool test __attribute__((unused)) = true;
+
+ __gnu_cxx::normal_mv_distribution<2,double> u({5.0, 4.0}, {2.0, 1.5}), v, w;
+
+ VERIFY( v == w );
+ VERIFY( !(u == v) );
+}
+
+int main()
+{
+ test01();
+ return 0;
+}
diff --git a/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/operators/inequal.cc b/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/operators/inequal.cc
new file mode 100644
index 00000000000..80472a13214
--- /dev/null
+++ b/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/operators/inequal.cc
@@ -0,0 +1,42 @@
+// { dg-options "-std=c++0x" }
+// { dg-require-cstdint "" }
+//
+// 2010-03-16 Paolo Carlini <paolo.carlini@oracle.com>
+// 2012-09-04 Ulrich Drepper <drepper@gmail.com>
+//
+// Copyright (C) 2012 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <ext/random>
+#include <testsuite_hooks.h>
+
+void
+test01()
+{
+ bool test __attribute__((unused)) = true;
+
+ __gnu_cxx::normal_mv_distribution<2,double> u({3.0, 5.0}, {1.0, 2.0}), v, w;
+
+ VERIFY( u != v );
+ VERIFY( !(v != w) );
+}
+
+int main()
+{
+ test01();
+ return 0;
+}
diff --git a/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/operators/serialize.cc b/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/operators/serialize.cc
new file mode 100644
index 00000000000..3620a9b12bc
--- /dev/null
+++ b/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/operators/serialize.cc
@@ -0,0 +1,44 @@
+// { dg-options "-std=c++0x" }
+// { dg-require-cstdint "" }
+//
+// 2009-08-14 Edward M. Smith-Rowland <3dw4rd@verizon.net>
+// 2012-09-04 Ulrich Drepper <drepper@gmail.com>
+//
+// Copyright (C) 2012 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <ext/random>
+#include <sstream>
+
+void
+test01()
+{
+ std::stringstream str;
+ __gnu_cxx::normal_mv_distribution<2,double> u({1.0, 5.0}, {2.0, 4.0}), v;
+ std::minstd_rand0 rng;
+
+ u(rng); // advance
+ str << u;
+
+ str >> v;
+}
+
+int main()
+{
+ test01();
+ return 0;
+}
diff --git a/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/requirements/explicit_instantiation/1.cc b/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/requirements/explicit_instantiation/1.cc
new file mode 100644
index 00000000000..55c4431a972
--- /dev/null
+++ b/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/requirements/explicit_instantiation/1.cc
@@ -0,0 +1,26 @@
+// { dg-do compile }
+// { dg-options "-std=c++11" }
+// { dg-require-cstdint "" }
+//
+// Copyright (C) 2012 Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <ext/random>
+
+template class __gnu_cxx::normal_mv_distribution<2,float>;
+template class __gnu_cxx::normal_mv_distribution<2,double>;
+template class __gnu_cxx::normal_mv_distribution<2,long double>;
diff --git a/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/requirements/typedefs.cc b/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/requirements/typedefs.cc
new file mode 100644
index 00000000000..4a0a14c9020
--- /dev/null
+++ b/libstdc++-v3/testsuite/26_numerics/random/normal_mv_distribution/requirements/typedefs.cc
@@ -0,0 +1,34 @@
+// { dg-do compile }
+// { dg-options "-std=c++0x" }
+// { dg-require-cstdint "" }
+//
+// 2008-11-24 Edward M. Smith-Rowland <3dw4rd@verizon.net>
+// 2012-09-04 Ulrich Drepper <drepper@gmail.com>
+//
+// Copyright (C) 2012 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <ext/random>
+
+void
+test01()
+{
+ typedef __gnu_cxx::normal_mv_distribution<3,double> test_type;
+
+ typedef test_type::result_type result_type;
+ typedef test_type::param_type param_type;
+}