summaryrefslogtreecommitdiff
path: root/gcc/config/aarch64/aarch64.c
diff options
context:
space:
mode:
authorjiwang <jiwang@138bc75d-0d04-0410-961f-82ee72b054a4>2017-01-20 00:03:20 +0000
committerjiwang <jiwang@138bc75d-0d04-0410-961f-82ee72b054a4>2017-01-20 00:03:20 +0000
commit06f29de13f48f7da8a8c616108f4e14a1d19b2c8 (patch)
tree37f06bdc0b1b1122a695d6a56a818ba288394b1f /gcc/config/aarch64/aarch64.c
parentf36ff00fbfcd68207e8a418dc146f198ca8385d6 (diff)
downloadgcc-06f29de13f48f7da8a8c616108f4e14a1d19b2c8.tar.gz
[AArch64][1/4] Support Return address protection on AArch64
gcc/ * config/aarch64/aarch64-opts.h (aarch64_function_type): New enum. * config/aarch64/aarch64-protos.h (aarch64_return_address_signing_enabled): New declaration. * config/aarch64/aarch64.c (aarch64_return_address_signing_enabled): New function. (aarch64_expand_prologue): Sign return address before it's pushed onto stack. (aarch64_expand_epilogue): Authenticate return address fetched from stack. (aarch64_override_options): Sanity check for ILP32 and ISA level. (aarch64_attributes): New function attributes for "sign-return-address". * config/aarch64/aarch64.md (UNSPEC_AUTI1716, UNSPEC_AUTISP, UNSPEC_PACI1716, UNSPEC_PACISP, UNSPEC_XPACLRI): New unspecs. ("*do_return"): Generate combined instructions according to key index. ("<pauth_mnem_prefix>sp", "<pauth_mnem_prefix1716", "xpaclri"): New. * config/aarch64/iterators.md (PAUTH_LR_SP, PAUTH_17_16): New integer iterators. (pauth_mnem_prefix, pauth_hint_num_a): New integer attributes. * config/aarch64/aarch64.opt (msign-return-address=): New. * doc/extend.texi (AArch64 Function Attributes): Documents "sign-return-address=". * doc/invoke.texi (AArch64 Options): Documents "-msign-return-address=". gcc/testsuite/ * gcc.target/aarch64/return_address_sign_1.c: New testcase for no combined instructions. * gcc.target/aarch64/return_address_sign_2.c: New testcase for combined instructions. * gcc.target/aarch64/return_address_sign_3.c: New testcase for disable of pointer authentication. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@244666 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/config/aarch64/aarch64.c')
-rw-r--r--gcc/config/aarch64/aarch64.c44
1 files changed, 44 insertions, 0 deletions
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 4432cae6b8d..62baf584994 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -3123,6 +3123,22 @@ aarch64_gen_load_pair (machine_mode mode, rtx reg1, rtx mem1, rtx reg2,
}
}
+/* Return TRUE if return address signing should be enabled for the current
+ function, otherwise return FALSE. */
+
+bool
+aarch64_return_address_signing_enabled (void)
+{
+ /* This function should only be called after frame laid out. */
+ gcc_assert (cfun->machine->frame.laid_out);
+
+ /* If signing scope is AARCH64_FUNCTION_NON_LEAF, we only sign a leaf function
+ if it's LR is pushed onto stack. */
+ return (aarch64_ra_sign_scope == AARCH64_FUNCTION_ALL
+ || (aarch64_ra_sign_scope == AARCH64_FUNCTION_NON_LEAF
+ && cfun->machine->frame.reg_offset[LR_REGNUM] >= 0));
+}
+
/* Emit code to save the callee-saved registers from register number START
to LIMIT to the stack at the location starting at offset START_OFFSET,
skipping any write-back candidates if SKIP_WB is true. */
@@ -3541,6 +3557,10 @@ aarch64_expand_prologue (void)
unsigned reg2 = cfun->machine->frame.wb_candidate2;
rtx_insn *insn;
+ /* Sign return address for functions. */
+ if (aarch64_return_address_signing_enabled ())
+ emit_insn (gen_pacisp ());
+
if (flag_stack_usage_info)
current_function_static_stack_size = frame_size;
@@ -3677,6 +3697,25 @@ aarch64_expand_epilogue (bool for_sibcall)
RTX_FRAME_RELATED_P (insn) = 1;
}
+ /* We prefer to emit the combined return/authenticate instruction RETAA,
+ however there are three cases in which we must instead emit an explicit
+ authentication instruction.
+
+ 1) Sibcalls don't return in a normal way, so if we're about to call one
+ we must authenticate.
+
+ 2) The RETAA instruction is not available before ARMv8.3-A, so if we are
+ generating code for !TARGET_ARMV8_3 we can't use it and must
+ explicitly authenticate.
+
+ 3) On an eh_return path we make extra stack adjustments to update the
+ canonical frame address to be the exception handler's CFA. We want
+ to authenticate using the CFA of the function which calls eh_return.
+ */
+ if (aarch64_return_address_signing_enabled ()
+ && (for_sibcall || !TARGET_ARMV8_3 || crtl->calls_eh_return))
+ emit_insn (gen_autisp ());
+
/* Stack adjustment for exception handler. */
if (crtl->calls_eh_return)
{
@@ -8889,6 +8928,9 @@ aarch64_override_options (void)
error ("Assembler does not support -mabi=ilp32");
#endif
+ if (aarch64_ra_sign_scope != AARCH64_FUNCTION_NONE && TARGET_ILP32)
+ sorry ("Return address signing is only supported for -mabi=lp64");
+
/* Make sure we properly set up the explicit options. */
if ((aarch64_cpu_string && valid_cpu)
|| (aarch64_tune_string && valid_tune))
@@ -9272,6 +9314,8 @@ static const struct aarch64_attribute_info aarch64_attributes[] =
{ "cpu", aarch64_attr_custom, false, aarch64_handle_attr_cpu, OPT_mcpu_ },
{ "tune", aarch64_attr_custom, false, aarch64_handle_attr_tune,
OPT_mtune_ },
+ { "sign-return-address", aarch64_attr_enum, false, NULL,
+ OPT_msign_return_address_ },
{ NULL, aarch64_attr_custom, false, NULL, OPT____ }
};