summaryrefslogtreecommitdiff
path: root/gcc/config/sparc/sparc.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/sparc/sparc.c')
-rw-r--r--gcc/config/sparc/sparc.c150
1 files changed, 146 insertions, 4 deletions
diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c
index b315698bc94..713db26ee0b 100644
--- a/gcc/config/sparc/sparc.c
+++ b/gcc/config/sparc/sparc.c
@@ -1160,6 +1160,17 @@ sparc_option_override (void)
gcc_unreachable ();
};
+ if (sparc_memory_model == SMM_DEFAULT)
+ {
+ /* Choose the most relaxed model for the processor. */
+ if (TARGET_V9)
+ sparc_memory_model = SMM_RMO;
+ else if (TARGET_V8)
+ sparc_memory_model = SMM_PSO;
+ else
+ sparc_memory_model = SMM_SC;
+ }
+
#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
target_flags |= MASK_LONG_DOUBLE_128;
@@ -10849,11 +10860,95 @@ sparc_mangle_type (const_tree type)
}
#endif
+/* Expand a membar instruction for various use cases. Both the LOAD_STORE
+ and BEFORE_AFTER arguments of the form X_Y. They are two-bit masks where
+ bit 0 indicates that X is true, and bit 1 indicates Y is true. */
+
+void
+sparc_emit_membar_for_model (enum memmodel model,
+ int load_store, int before_after)
+{
+ /* Bits for the MEMBAR mmask field. */
+ const int LoadLoad = 1;
+ const int StoreLoad = 2;
+ const int LoadStore = 4;
+ const int StoreStore = 8;
+
+ int mm = 0, implied = 0;
+
+ switch (sparc_memory_model)
+ {
+ case SMM_SC:
+ /* Sequential Consistency. All memory transactions are immediately
+ visible in sequential execution order. No barriers needed. */
+ implied = LoadLoad | StoreLoad | LoadStore | StoreStore;
+ break;
+
+ case SMM_TSO:
+ /* Total Store Ordering: all memory transactions with store semantics
+ are followed by an implied StoreStore. */
+ implied |= StoreStore;
+ /* FALLTHRU */
+
+ case SMM_PSO:
+ /* Partial Store Ordering: all memory transactions with load semantics
+ are followed by an implied LoadLoad | LoadStore. */
+ implied |= LoadLoad | LoadStore;
+
+ /* If we're not looking for a raw barrer (before+after), then atomic
+ operations get the benefit of being both load and store. */
+ if (load_store == 3 && before_after == 2)
+ implied |= StoreLoad | StoreStore;
+ /* FALLTHRU */
+
+ case SMM_RMO:
+ /* Relaxed Memory Ordering: no implicit bits. */
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (before_after & 1)
+ {
+ if (model == MEMMODEL_ACQUIRE
+ || model == MEMMODEL_ACQ_REL
+ || model == MEMMODEL_SEQ_CST)
+ {
+ if (load_store & 1)
+ mm |= LoadLoad | LoadStore;
+ if (load_store & 2)
+ mm |= StoreLoad | StoreStore;
+ }
+ }
+ if (before_after & 2)
+ {
+ if (model == MEMMODEL_RELEASE
+ || model == MEMMODEL_ACQ_REL
+ || model == MEMMODEL_SEQ_CST)
+ {
+ if (load_store & 1)
+ mm |= LoadLoad | StoreLoad;
+ if (load_store & 2)
+ mm |= LoadStore | StoreStore;
+ }
+ }
+
+ /* Remove the bits implied by the system memory model. */
+ mm &= ~implied;
+
+ /* For raw barriers (before+after), always emit a barrier.
+ This will become a compile-time barrier if needed. */
+ if (mm || before_after == 3)
+ emit_insn (gen_membar (GEN_INT (mm)));
+}
+
/* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
compare and swap on the word containing the byte or half-word. */
-void
-sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
+static void
+sparc_expand_compare_and_swap_12 (rtx bool_result, rtx result, rtx mem,
+ rtx oldval, rtx newval)
{
rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
rtx addr = gen_reg_rtx (Pmode);
@@ -10878,7 +10973,7 @@ sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
- val = force_reg (SImode, memsi);
+ val = copy_to_reg (memsi);
emit_insn (gen_rtx_SET (VOIDmode, off,
gen_rtx_XOR (SImode, off,
@@ -10924,7 +11019,9 @@ sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
emit_insn (gen_rtx_SET (VOIDmode, newvalue,
gen_rtx_IOR (SImode, newv, val)));
- emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
+ emit_move_insn (bool_result, const1_rtx);
+
+ emit_insn (gen_atomic_compare_and_swapsi_1 (res, memsi, oldvalue, newvalue));
emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
@@ -10932,6 +11029,8 @@ sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
res)));
+ emit_move_insn (bool_result, const0_rtx);
+
cc = gen_compare_reg_1 (NE, resv, val);
emit_insn (gen_rtx_SET (VOIDmode, val, resv));
@@ -10950,6 +11049,49 @@ sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
}
+/* Expand code to perform a compare-and-swap. */
+
+void
+sparc_expand_compare_and_swap (rtx operands[])
+{
+ rtx bval, retval, mem, oldval, newval;
+ enum machine_mode mode;
+ enum memmodel model;
+
+ bval = operands[0];
+ retval = operands[1];
+ mem = operands[2];
+ oldval = operands[3];
+ newval = operands[4];
+ model = (enum memmodel) INTVAL (operands[6]);
+ mode = GET_MODE (mem);
+
+ sparc_emit_membar_for_model (model, 3, 1);
+
+ if (reg_overlap_mentioned_p (retval, oldval))
+ oldval = copy_to_reg (oldval);
+
+ if (mode == QImode || mode == HImode)
+ sparc_expand_compare_and_swap_12 (bval, retval, mem, oldval, newval);
+ else
+ {
+ rtx (*gen) (rtx, rtx, rtx, rtx);
+ rtx x;
+
+ if (mode == SImode)
+ gen = gen_atomic_compare_and_swapsi_1;
+ else
+ gen = gen_atomic_compare_and_swapdi_1;
+ emit_insn (gen (retval, mem, oldval, newval));
+
+ x = emit_store_flag (bval, EQ, retval, oldval, mode, 1, 1);
+ if (x != bval)
+ convert_move (bval, x, 1);
+ }
+
+ sparc_emit_membar_for_model (model, 3, 2);
+}
+
void
sparc_expand_vec_perm_bmask (enum machine_mode vmode, rtx sel)
{