summaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile/internal')
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64.rules4
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS64.rules3
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64.rules4
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64Ops.go4
-rw-r--r--src/cmd/compile/internal/ssa/gen/RISCV64.rules2
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go6
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS64.go24
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64.go25
-rw-r--r--src/cmd/compile/internal/ssa/rewriteRISCV64.go24
9 files changed, 72 insertions, 24 deletions
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index 09e70ad13b..ad99960078 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -316,9 +316,9 @@
(FCMPD x (FMOVDconst [0])) => (FCMPD0 x)
(FCMPD (FMOVDconst [0]) x) => (InvertFlags (FCMPD0 x))
-// CSEL needs a flag-generating argument. Synthesize a CMPW if necessary.
+// CSEL needs a flag-generating argument. Synthesize a TSTW if necessary.
(CondSelect x y boolval) && flagArg(boolval) != nil => (CSEL [boolval.Op] x y flagArg(boolval))
-(CondSelect x y boolval) && flagArg(boolval) == nil => (CSEL [OpARM64NotEqual] x y (CMPWconst [0] boolval))
+(CondSelect x y boolval) && flagArg(boolval) == nil => (CSEL [OpARM64NotEqual] x y (TSTWconst [1] boolval))
(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVDaddr [int32(off)] ptr)
(OffPtr [off] ptr) => (ADDconst [off] ptr)
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
index 292ff2fc79..0d6d30fa4c 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS64.rules
+++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
@@ -392,7 +392,8 @@
(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
-(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...)
+(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
+(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
// checks
(NilCheck ...) => (LoweredNilCheck ...)
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
index a90a3d0937..f83ed78bab 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -562,9 +562,9 @@
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (XORCC x y) yes no)
// Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably.
-(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (CMPWconst [0] bool))
+(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
// Fold any CR -> GPR -> CR transfers when applying the above rule.
-(ISEL [6] x y (CMPWconst [0] (ISELB [c] one cmp))) => (ISEL [c] x y cmp)
+(ISEL [6] x y (Select1 (ANDCCconst [1] (ISELB [c] one cmp)))) => (ISEL [c] x y cmp)
// Lowering loads
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
index 59d8af1a9d..d18cbcc787 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
@@ -11,8 +11,8 @@ import "strings"
// Notes:
// - Less-than-64-bit integer types live in the low portion of registers.
-// For now, the upper portion is junk; sign/zero-extension might be optimized in the future, but not yet.
-// - Boolean types are zero or 1; stored in a byte, but loaded with AMOVBZ so the upper bytes of a register are zero.
+// The upper portion is junk.
+// - Boolean types are zero or 1; stored in a byte, with upper bytes of the register containing junk.
// - *const instructions may use a constant larger than the instruction can encode.
// In this case the assembler expands to multiple instructions and uses tmp
// register (R31).
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
index 7aea622c5e..acef3df389 100644
--- a/src/cmd/compile/internal/ssa/gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
@@ -568,7 +568,7 @@
(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
-(AtomicCompareAndSwap32 ...) => (LoweredAtomicCas32 ...)
+(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
(AtomicExchange32 ...) => (LoweredAtomicExchange32 ...)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index 1ee25c2eee..ad1052f88d 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -23409,7 +23409,7 @@ func rewriteValueARM64_OpCondSelect(v *Value) bool {
}
// match: (CondSelect x y boolval)
// cond: flagArg(boolval) == nil
- // result: (CSEL [OpARM64NotEqual] x y (CMPWconst [0] boolval))
+ // result: (CSEL [OpARM64NotEqual] x y (TSTWconst [1] boolval))
for {
x := v_0
y := v_1
@@ -23419,8 +23419,8 @@ func rewriteValueARM64_OpCondSelect(v *Value) bool {
}
v.reset(OpARM64CSEL)
v.AuxInt = opToAuxInt(OpARM64NotEqual)
- v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, types.TypeFlags)
- v0.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(1)
v0.AddArg(boolval)
v.AddArg3(x, y, v0)
return true
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
index 1fbd556b5c..6a0fd3ad6e 100644
--- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
@@ -52,8 +52,7 @@ func rewriteValueMIPS64(v *Value) bool {
v.Op = OpMIPS64LoweredAtomicAdd64
return true
case OpAtomicCompareAndSwap32:
- v.Op = OpMIPS64LoweredAtomicCas32
- return true
+ return rewriteValueMIPS64_OpAtomicCompareAndSwap32(v)
case OpAtomicCompareAndSwap64:
v.Op = OpMIPS64LoweredAtomicCas64
return true
@@ -697,6 +696,27 @@ func rewriteValueMIPS64_OpAddr(v *Value) bool {
return true
}
}
+func rewriteValueMIPS64_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicCompareAndSwap32 ptr old new mem)
+ // result: (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new := v_2
+ mem := v_3
+ v.reset(OpMIPS64LoweredAtomicCas32)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(old)
+ v.AddArg4(ptr, v0, new, mem)
+ return true
+ }
+}
func rewriteValueMIPS64_OpAvg64u(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index 5a28b9d4f7..c7bcc248fc 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -1167,9 +1167,10 @@ func rewriteValuePPC64_OpCondSelect(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ typ := &b.Func.Config.Types
// match: (CondSelect x y bool)
// cond: flagArg(bool) == nil
- // result: (ISEL [6] x y (CMPWconst [0] bool))
+ // result: (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
for {
x := v_0
y := v_1
@@ -1179,9 +1180,11 @@ func rewriteValuePPC64_OpCondSelect(v *Value) bool {
}
v.reset(OpPPC64ISEL)
v.AuxInt = int32ToAuxInt(6)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags)
- v0.AuxInt = int32ToAuxInt(0)
- v0.AddArg(bool)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AuxInt = int64ToAuxInt(1)
+ v1.AddArg(bool)
+ v0.AddArg(v1)
v.AddArg3(x, y, v0)
return true
}
@@ -5895,7 +5898,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool {
v.AddArg(y)
return true
}
- // match: (ISEL [6] x y (CMPWconst [0] (ISELB [c] one cmp)))
+ // match: (ISEL [6] x y (Select1 (ANDCCconst [1] (ISELB [c] one cmp))))
// result: (ISEL [c] x y cmp)
for {
if auxIntToInt32(v.AuxInt) != 6 {
@@ -5903,15 +5906,19 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool {
}
x := v_0
y := v_1
- if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ if v_2.Op != OpSelect1 {
break
}
v_2_0 := v_2.Args[0]
- if v_2_0.Op != OpPPC64ISELB {
+ if v_2_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0.AuxInt) != 1 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpPPC64ISELB {
break
}
- c := auxIntToInt32(v_2_0.AuxInt)
- cmp := v_2_0.Args[1]
+ c := auxIntToInt32(v_2_0_0.AuxInt)
+ cmp := v_2_0_0.Args[1]
v.reset(OpPPC64ISEL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg3(x, y, cmp)
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
index 6828d97ff8..b277979061 100644
--- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -61,8 +61,7 @@ func rewriteValueRISCV64(v *Value) bool {
case OpAtomicAnd8:
return rewriteValueRISCV64_OpAtomicAnd8(v)
case OpAtomicCompareAndSwap32:
- v.Op = OpRISCV64LoweredAtomicCas32
- return true
+ return rewriteValueRISCV64_OpAtomicCompareAndSwap32(v)
case OpAtomicCompareAndSwap64:
v.Op = OpRISCV64LoweredAtomicCas64
return true
@@ -765,6 +764,27 @@ func rewriteValueRISCV64_OpAtomicAnd8(v *Value) bool {
return true
}
}
+func rewriteValueRISCV64_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicCompareAndSwap32 ptr old new mem)
+ // result: (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new := v_2
+ mem := v_3
+ v.reset(OpRISCV64LoweredAtomicCas32)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(old)
+ v.AddArg4(ptr, v0, new, mem)
+ return true
+ }
+}
func rewriteValueRISCV64_OpAtomicOr8(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]