summaryrefslogtreecommitdiff
path: root/gcc/config/s390/vx-builtins.md
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/s390/vx-builtins.md')
-rw-r--r--gcc/config/s390/vx-builtins.md2081
1 files changed, 2081 insertions, 0 deletions
diff --git a/gcc/config/s390/vx-builtins.md b/gcc/config/s390/vx-builtins.md
new file mode 100644
index 00000000000..35ada1371ff
--- /dev/null
+++ b/gcc/config/s390/vx-builtins.md
@@ -0,0 +1,2081 @@
+;;- Instruction patterns for the System z vector facility builtins.
+;; Copyright (C) 2015 Free Software Foundation, Inc.
+;; Contributed by Andreas Krebbel (Andreas.Krebbel@de.ibm.com)
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it under
+;; the terms of the GNU General Public License as published by the Free
+;; Software Foundation; either version 3, or (at your option) any later
+;; version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+;; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+;; for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+; The patterns in this file are enabled with -mzvector
+
+(define_mode_iterator V_HW_64 [V2DI V2DF])
+(define_mode_iterator V_HW_32_64 [V4SI V2DI V2DF])
+(define_mode_iterator VI_HW_SD [V4SI V2DI])
+(define_mode_iterator V_HW_HSD [V8HI V4SI V2DI V2DF])
+(define_mode_iterator VI_HW_HSD [V8HI V4SI V2DI])
+
+; The element type of the vector with floating point modes translated
+; to int modes of the same size.
+(define_mode_attr non_vec_int[(V2QI "QI") (V4QI "QI") (V8QI "QI") (V16QI "QI")
+ (V2HI "HI") (V4HI "HI") (V8HI "HI")
+ (V2SI "SI") (V4SI "SI")
+ (V2DI "DI")
+ (V2SF "SI") (V4SF "SI")
+ (V2DF "DI")])
+
+; Condition code modes generated by int comparisons
+(define_mode_iterator VICMP [CCVEQ CCVH CCVHU])
+
+; Comparisons supported by the vec_cmp* builtins
+(define_code_iterator intcmp [eq gt gtu ge geu lt ltu le leu])
+(define_code_iterator fpcmp [eq gt ge lt le])
+
+; Comparisons supported by the vec_all/any* builtins
+(define_code_iterator intcmpcc [eq ne gt ge lt le gtu geu ltu leu])
+(define_code_iterator fpcmpcc [eq ne gt ge unle unlt lt le])
+
+; Flags for vector string instructions (vfae all 4, vfee only ZS and CS, vstrc all 4)
+(define_constants
+ [(VSTRING_FLAG_IN 8) ; invert result
+ (VSTRING_FLAG_RT 4) ; result type
+ (VSTRING_FLAG_ZS 2) ; zero search
+ (VSTRING_FLAG_CS 1)]) ; condition code set
+
+; Rounding modes as being used for e.g. VFI
+(define_constants
+ [(VEC_RND_CURRENT 0)
+ (VEC_RND_NEAREST_AWAY_FROM_ZERO 1)
+ (VEC_RND_SHORT_PREC 3)
+ (VEC_RND_NEAREST_TO_EVEN 4)
+ (VEC_RND_TO_ZERO 5)
+ (VEC_RND_TO_INF 6)
+ (VEC_RND_TO_MINF 7)])
+
+
+; Vector gather element
+
+(define_insn "vec_gather_element<mode>"
+ [(set (match_operand:V_HW_32_64 0 "register_operand" "=v")
+ (unspec:V_HW_32_64 [(match_operand:V_HW_32_64 1 "register_operand" "0")
+ (match_operand:<tointvec> 2 "register_operand" "v")
+ (match_operand:BLK 3 "memory_operand" "QR")
+ (match_operand:QI 4 "immediate_operand" "C")]
+ UNSPEC_VEC_GATHER))]
+ "TARGET_VX"
+ "vge<bhfgq>\t%0,%O3(%v2,%R3),%b4"
+ [(set_attr "op_type" "VRV")])
+
+(define_expand "vec_genmask<mode>"
+ [(match_operand:VI_HW 0 "register_operand" "=v")
+ (match_operand:QI 1 "immediate_operand" "C")
+ (match_operand:QI 2 "immediate_operand" "C")]
+ "TARGET_VX"
+{
+ int nunits = GET_MODE_NUNITS (<VI_HW:MODE>mode);
+ int bitlen = GET_MODE_UNIT_BITSIZE (<VI_HW:MODE>mode);
+ /* To bit little endian style. */
+ int end = bitlen - 1 - INTVAL (operands[1]);
+ int start = bitlen - 1 - INTVAL (operands[2]);
+ rtx const_vec[16];
+ int i;
+ unsigned HOST_WIDE_INT mask;
+ bool swapped_p = false;
+
+ if (start > end)
+ {
+ i = start - 1; start = end + 1; end = i;
+ swapped_p = true;
+ }
+ if (end == 63)
+ mask = HOST_WIDE_INT_M1U;
+ else
+ mask = (HOST_WIDE_INT_1U << (end + 1)) - 1;
+
+ mask &= ~((HOST_WIDE_INT_1U << start) - 1);
+
+ if (swapped_p)
+ mask = ~mask;
+
+ for (i = 0; i < nunits; i++)
+ const_vec[i] = GEN_INT (trunc_int_for_mode (mask,
+ GET_MODE_INNER (<VI_HW:MODE>mode)));
+
+ emit_insn (gen_rtx_SET (operands[0],
+ gen_rtx_CONST_VECTOR (<VI_HW:MODE>mode,
+ gen_rtvec_v (nunits, const_vec))));
+ DONE;
+})
+
+(define_expand "vec_genbytemaskv16qi"
+ [(match_operand:V16QI 0 "register_operand" "")
+ (match_operand 1 "immediate_operand" "")]
+ "TARGET_VX && CONST_OK_FOR_CONSTRAINT_P (INTVAL (operands[1]), 'K', \"K\")"
+{
+ int i;
+ unsigned mask = 0x8000;
+ rtx const_vec[16];
+ unsigned HOST_WIDE_INT byte_mask = INTVAL (operands[1]);
+
+ for (i = 0; i < 16; i++)
+ {
+ if (mask & byte_mask)
+ const_vec[i] = constm1_rtx;
+ else
+ const_vec[i] = const0_rtx;
+ mask = mask >> 1;
+ }
+ emit_insn (gen_rtx_SET (operands[0],
+ gen_rtx_CONST_VECTOR (V16QImode,
+ gen_rtvec_v (16, const_vec))));
+ DONE;
+})
+
+(define_expand "vec_splats<mode>"
+ [(set (match_operand:V_HW 0 "register_operand" "")
+ (vec_duplicate:V_HW (match_operand:<non_vec> 1 "general_operand" "")))]
+ "TARGET_VX")
+
+(define_expand "vec_insert<mode>"
+ [(set (match_operand:V_HW 0 "register_operand" "")
+ (unspec:V_HW [(match_operand:<non_vec> 2 "register_operand" "")
+ (match_operand:SI 3 "shift_count_or_setmem_operand" "")
+ (match_operand:V_HW 1 "register_operand" "")]
+ UNSPEC_VEC_SET))]
+ "TARGET_VX"
+ "")
+
+; This is vec_set + modulo arithmetic on the element selector (op 2)
+(define_expand "vec_promote<mode>"
+ [(set (match_operand:V_HW 0 "register_operand" "")
+ (unspec:V_HW [(match_operand:<non_vec> 1 "register_operand" "")
+ (match_operand:SI 2 "shift_count_or_setmem_operand" "")
+ (match_dup 0)]
+ UNSPEC_VEC_SET))]
+ "TARGET_VX"
+ "")
+
+; vec_extract is also an RTL standard name -> vector.md
+
+(define_insn "vec_insert_and_zero<mode>"
+ [(set (match_operand:V_HW 0 "register_operand" "=v")
+ (unspec:V_HW [(match_operand:<non_vec> 1 "memory_operand" "QR")]
+ UNSPEC_VEC_INSERT_AND_ZERO))]
+ "TARGET_VX"
+ "vllez<bhfgq>\t%v0,%1"
+ [(set_attr "op_type" "VRX")])
+
+(define_insn "vlbb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "QR")
+ (match_operand:HI 2 "immediate_operand" " K")]
+ UNSPEC_VEC_LOAD_BNDRY))]
+ "TARGET_VX"
+ "vlbb\t%v0,%1,%2"
+ [(set_attr "op_type" "VRX")])
+
+; FIXME: The following two patterns might using vec_merge. But what is
+; the canonical form: (vec_select (vec_merge op0 op1)) or (vec_merge
+; (vec_select op0) (vec_select op1)
+(define_insn "vec_mergeh<mode>"
+ [(set (match_operand:V_HW 0 "register_operand" "=v")
+ (unspec:V_HW [(match_operand:V_HW 1 "register_operand" "v")
+ (match_operand:V_HW 2 "register_operand" "v")]
+ UNSPEC_VEC_MERGEH))]
+ "TARGET_VX"
+ "vmrh<bhfgq>\t%v0,%1,%2"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "vec_mergel<mode>"
+ [(set (match_operand:V_HW 0 "register_operand" "=v")
+ (unspec:V_HW [(match_operand:V_HW 1 "register_operand" "v")
+ (match_operand:V_HW 2 "register_operand" "v")]
+ UNSPEC_VEC_MERGEL))]
+ "TARGET_VX"
+ "vmrl<bhfgq>\t%v0,%1,%2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector pack
+
+(define_insn "vec_pack<mode>"
+ [(set (match_operand:<vec_half> 0 "register_operand" "=v")
+ (unspec:<vec_half> [(match_operand:VI_HW_HSD 1 "register_operand" "v")
+ (match_operand:VI_HW_HSD 2 "register_operand" "v")]
+ UNSPEC_VEC_PACK))]
+ "TARGET_VX"
+ "vpk<bhfgq>\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector pack saturate
+
+(define_insn "vec_packs<mode>"
+ [(set (match_operand:<vec_half> 0 "register_operand" "=v")
+ (unspec:<vec_half> [(match_operand:VI_HW_HSD 1 "register_operand" "v")
+ (match_operand:VI_HW_HSD 2 "register_operand" "v")]
+ UNSPEC_VEC_PACK_SATURATE))]
+ "TARGET_VX"
+ "vpks<bhfgq>\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; This is vec_packs_cc + loading cc into a caller specified memory location.
+(define_expand "vec_packs_cc<mode>"
+ [(parallel
+ [(set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_operand:VI_HW_HSD 1 "register_operand" "")
+ (match_operand:VI_HW_HSD 2 "register_operand" "")]
+ UNSPEC_VEC_PACK_SATURATE_GENCC))
+ (set (match_operand:<vec_half> 0 "register_operand" "")
+ (unspec:<vec_half> [(match_dup 1) (match_dup 2)]
+ UNSPEC_VEC_PACK_SATURATE_CC))])
+ (set (match_dup 4)
+ (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))
+ (set (match_operand:SI 3 "memory_operand" "")
+ (match_dup 4))]
+ "TARGET_VX"
+{
+ operands[4] = gen_reg_rtx (SImode);
+})
+
+(define_insn "*vec_packs_cc<mode>"
+ [(set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_operand:VI_HW_HSD 1 "register_operand" "v")
+ (match_operand:VI_HW_HSD 2 "register_operand" "v")]
+ UNSPEC_VEC_PACK_SATURATE_GENCC))
+ (set (match_operand:<vec_half> 0 "register_operand" "=v")
+ (unspec:<vec_half> [(match_dup 1) (match_dup 2)]
+ UNSPEC_VEC_PACK_SATURATE_CC))]
+ "TARGET_VX"
+ "vpks<bhfgq>s\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector pack logical saturate
+
+(define_insn "vec_packsu<mode>"
+ [(set (match_operand:<vec_half> 0 "register_operand" "=v")
+ (unspec:<vec_half> [(match_operand:VI_HW_HSD 1 "register_operand" "v")
+ (match_operand:VI_HW_HSD 2 "register_operand" "v")]
+ UNSPEC_VEC_PACK_UNSIGNED_SATURATE))]
+ "TARGET_VX"
+ "vpkls<bhfgq>\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+; Emulate saturate unsigned pack on signed operands.
+; Zero out negative elements and continue with the unsigned saturating pack.
+(define_expand "vec_packsu_u<mode>"
+ [(set (match_operand:<vec_half> 0 "register_operand" "=v")
+ (unspec:<vec_half> [(match_operand:VI_HW_HSD 1 "register_operand" "v")
+ (match_operand:VI_HW_HSD 2 "register_operand" "v")]
+ UNSPEC_VEC_PACK_UNSIGNED_SATURATE))]
+ "TARGET_VX"
+{
+ rtx null_vec = CONST0_RTX(<MODE>mode);
+ machine_mode half_mode;
+ switch (<MODE>mode)
+ {
+ case V8HImode: half_mode = V16QImode; break;
+ case V4SImode: half_mode = V8HImode; break;
+ case V2DImode: half_mode = V4SImode; break;
+ default: gcc_unreachable ();
+ }
+ s390_expand_vcond (operands[1], operands[1], null_vec,
+ GE, operands[1], null_vec);
+ s390_expand_vcond (operands[2], operands[2], null_vec,
+ GE, operands[2], null_vec);
+ emit_insn (gen_rtx_SET (operands[0],
+ gen_rtx_UNSPEC (half_mode,
+ gen_rtvec (2, operands[1], operands[2]),
+ UNSPEC_VEC_PACK_UNSIGNED_SATURATE)));
+ DONE;
+})
+
+; This is vec_packsu_cc + loading cc into a caller specified memory location.
+; FIXME: The reg to target mem copy should be issued by reload?!
+(define_expand "vec_packsu_cc<mode>"
+ [(parallel
+ [(set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_operand:VI_HW_HSD 1 "register_operand" "")
+ (match_operand:VI_HW_HSD 2 "register_operand" "")]
+ UNSPEC_VEC_PACK_UNSIGNED_SATURATE_GENCC))
+ (set (match_operand:<vec_half> 0 "register_operand" "")
+ (unspec:<vec_half> [(match_dup 1) (match_dup 2)]
+ UNSPEC_VEC_PACK_UNSIGNED_SATURATE_CC))])
+ (set (match_dup 4)
+ (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))
+ (set (match_operand:SI 3 "memory_operand" "")
+ (match_dup 4))]
+ "TARGET_VX"
+{
+ operands[4] = gen_reg_rtx (SImode);
+})
+
+(define_insn "*vec_packsu_cc<mode>"
+ [(set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_operand:VI_HW_HSD 1 "register_operand" "v")
+ (match_operand:VI_HW_HSD 2 "register_operand" "v")]
+ UNSPEC_VEC_PACK_UNSIGNED_SATURATE_GENCC))
+ (set (match_operand:<vec_half> 0 "register_operand" "=v")
+ (unspec:<vec_half> [(match_dup 1) (match_dup 2)]
+ UNSPEC_VEC_PACK_UNSIGNED_SATURATE_CC))]
+ "TARGET_VX"
+ "vpkls<bhfgq>s\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector permute
+
+; vec_perm is also RTL standard name, but we can only use it for V16QI
+
+(define_insn "vec_zperm<mode>"
+ [(set (match_operand:V_HW_HSD 0 "register_operand" "=v")
+ (unspec:V_HW_HSD [(match_operand:V_HW_HSD 1 "register_operand" "v")
+ (match_operand:V_HW_HSD 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_VEC_PERM))]
+ "TARGET_VX"
+ "vperm\t%v0,%v1,%v2,%v3"
+ [(set_attr "op_type" "VRR")])
+
+(define_expand "vec_permi<mode>"
+ [(set (match_operand:V_HW_64 0 "register_operand" "")
+ (unspec:V_HW_64 [(match_operand:V_HW_64 1 "register_operand" "")
+ (match_operand:V_HW_64 2 "register_operand" "")
+ (match_operand:QI 3 "immediate_operand" "")]
+ UNSPEC_VEC_PERMI))]
+ "TARGET_VX"
+{
+ HOST_WIDE_INT val = INTVAL (operands[3]);
+ operands[3] = GEN_INT ((val & 1) | (val & 2) << 1);
+})
+
+(define_insn "*vec_permi<mode>"
+ [(set (match_operand:V_HW_64 0 "register_operand" "=v")
+ (unspec:V_HW_64 [(match_operand:V_HW_64 1 "register_operand" "v")
+ (match_operand:V_HW_64 2 "register_operand" "v")
+ (match_operand:QI 3 "immediate_operand" "C")]
+ UNSPEC_VEC_PERMI))]
+ "TARGET_VX"
+ "vpdi\t%v0,%v1,%v2,%b3"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector replicate
+
+
+; Replicate from vector element
+(define_expand "vec_splat<mode>"
+ [(set (match_operand:V_HW 0 "register_operand" "")
+ (vec_duplicate:V_HW (vec_select:<non_vec>
+ (match_operand:V_HW 1 "register_operand" "")
+ (parallel
+ [(match_operand:QI 2 "immediate_operand" "")]))))]
+ "TARGET_VX")
+
+; Vector scatter element
+
+; vscef, vsceg
+
+; A 64 bit target adress generated from 32 bit elements
+(define_insn "vec_scatter_elementv4si_DI"
+ [(set (mem:SI
+ (plus:DI (zero_extend:DI
+ (unspec:SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:DI 3 "immediate_operand" "I")]
+ UNSPEC_VEC_EXTRACT))
+ (match_operand:SI 2 "address_operand" "ZQ")))
+ (unspec:SI [(match_operand:V4SI 0 "register_operand" "v")
+ (match_dup 3)] UNSPEC_VEC_EXTRACT))]
+ "TARGET_VX && TARGET_64BIT"
+ "vscef\t%v0,%O2(%v1,%R2),%3"
+ [(set_attr "op_type" "VRV")])
+
+; A 31 bit target address is generated from 64 bit elements
+(define_insn "vec_scatter_element<V_HW_64:mode>_SI"
+ [(set (mem:<non_vec>
+ (plus:SI (subreg:SI
+ (unspec:<non_vec_int> [(match_operand:V_HW_64 1 "register_operand" "v")
+ (match_operand:DI 3 "immediate_operand" "I")]
+ UNSPEC_VEC_EXTRACT) 4)
+ (match_operand:SI 2 "address_operand" "ZQ")))
+ (unspec:<non_vec> [(match_operand:V_HW_64 0 "register_operand" "v")
+ (match_dup 3)] UNSPEC_VEC_EXTRACT))]
+ "TARGET_VX && !TARGET_64BIT"
+ "vsce<V_HW_64:bhfgq>\t%v0,%O2(%v1,%R2),%3"
+ [(set_attr "op_type" "VRV")])
+
+; Element size and target adress size is the same
+(define_insn "vec_scatter_element<mode>_<non_vec_int>"
+ [(set (mem:<non_vec>
+ (plus:<non_vec_int> (unspec:<non_vec_int>
+ [(match_operand:<tointvec> 1 "register_operand" "v")
+ (match_operand:DI 3 "immediate_operand" "I")]
+ UNSPEC_VEC_EXTRACT)
+ (match_operand:DI 2 "address_operand" "ZQ")))
+ (unspec:<non_vec> [(match_operand:V_HW_32_64 0 "register_operand" "v")
+ (match_dup 3)] UNSPEC_VEC_EXTRACT))]
+ "TARGET_VX"
+ "vsce<bhfgq>\t%v0,%O2(%v1,%R2),%3"
+ [(set_attr "op_type" "VRV")])
+
+; Depending on the address size we have to expand a different pattern.
+; This however cannot be represented in s390-builtins.def so we do the
+; multiplexing here in the expander.
+(define_expand "vec_scatter_element<V_HW_32_64:mode>"
+ [(match_operand:V_HW_32_64 0 "register_operand" "")
+ (match_operand:<tointvec> 1 "register_operand" "")
+ (match_operand 2 "address_operand" "")
+ (match_operand:DI 3 "immediate_operand" "")]
+ "TARGET_VX"
+{
+ if (TARGET_64BIT)
+ {
+ PUT_MODE (operands[2], DImode);
+ emit_insn (
+ gen_vec_scatter_element<V_HW_32_64:mode>_DI (operands[0], operands[1],
+ operands[2], operands[3]));
+ }
+ else
+ {
+ PUT_MODE (operands[2], SImode);
+ emit_insn (
+ gen_vec_scatter_element<V_HW_32_64:mode>_SI (operands[0], operands[1],
+ operands[2], operands[3]));
+ }
+ DONE;
+})
+
+
+; Vector select
+
+; Operand 3 selects bits from either OP1 (0) or OP2 (1)
+
+; Comparison operator should not matter as long as we always use the same ?!
+
+; Operands 1 and 2 are swapped in order to match the altivec builtin.
+; If operand 3 is a const_int bitmask this would be vec_merge
+(define_expand "vec_sel<mode>"
+ [(set (match_operand:V_HW 0 "register_operand" "")
+ (if_then_else:V_HW
+ (eq (match_operand:<tointvec> 3 "register_operand" "")
+ (match_dup 4))
+ (match_operand:V_HW 2 "register_operand" "")
+ (match_operand:V_HW 1 "register_operand" "")))]
+ "TARGET_VX"
+{
+ operands[4] = CONST0_RTX (<tointvec>mode);
+})
+
+
+; Vector sign extend to doubleword
+
+; Sign extend of right most vector element to respective double-word
+(define_insn "vec_extend<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")]
+ UNSPEC_VEC_EXTEND))]
+ "TARGET_VX"
+ "vseg<bhfgq>\t%v0,%1"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector store with length
+
+; Store bytes in OP1 from OP0 with the highest indexed byte to be
+; stored from OP0 given by OP2
+(define_insn "vstl<mode>"
+ [(set (match_operand:BLK 2 "memory_operand" "=Q")
+ (unspec:BLK [(match_operand:V 0 "register_operand" "v")
+ (match_operand:SI 1 "register_operand" "d")]
+ UNSPEC_VEC_STORE_LEN))]
+ "TARGET_VX"
+ "vstl\t%v0,%1,%2"
+ [(set_attr "op_type" "VRS")])
+
+
+; Vector unpack high
+
+; vuphb, vuphh, vuphf
+(define_insn "vec_unpackh<mode>"
+ [(set (match_operand:<vec_double> 0 "register_operand" "=v")
+ (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v")]
+ UNSPEC_VEC_UNPACKH))]
+ "TARGET_VX"
+ "vuph<bhfgq>\t%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+; vuplhb, vuplhh, vuplhf
+(define_insn "vec_unpackh_l<mode>"
+ [(set (match_operand:<vec_double> 0 "register_operand" "=v")
+ (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v")]
+ UNSPEC_VEC_UNPACKH_L))]
+ "TARGET_VX"
+ "vuplh<bhfgq>\t%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector unpack low
+
+; vuplb, vuplhw, vuplf
+(define_insn "vec_unpackl<mode>"
+ [(set (match_operand:<vec_double> 0 "register_operand" "=v")
+ (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v")]
+ UNSPEC_VEC_UNPACKL))]
+ "TARGET_VX"
+ "vupl<bhfgq><w>\t%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+; vupllb, vupllh, vupllf
+(define_insn "vec_unpackl_l<mode>"
+ [(set (match_operand:<vec_double> 0 "register_operand" "=v")
+ (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v")]
+ UNSPEC_VEC_UNPACKL_L))]
+ "TARGET_VX"
+ "vupll<bhfgq>\t%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector add
+
+; vaq
+
+; zvector builtins uses V16QI operands. So replace the modes in order
+; to map this to a TImode add. We have to keep the V16QI mode
+; operands in the expander in order to allow some operand type
+; checking when expanding the builtin.
+(define_expand "vec_add_u128"
+ [(match_operand:V16QI 0 "register_operand" "")
+ (match_operand:V16QI 1 "register_operand" "")
+ (match_operand:V16QI 2 "register_operand" "")]
+ "TARGET_VX"
+{
+ rtx op0 = gen_rtx_SUBREG (TImode, operands[0], 0);
+ rtx op1 = gen_rtx_SUBREG (TImode, operands[1], 0);
+ rtx op2 = gen_rtx_SUBREG (TImode, operands[2], 0);
+
+ emit_insn (gen_rtx_SET (op0,
+ gen_rtx_PLUS (TImode, op1, op2)));
+ DONE;
+})
+
+; Vector add compute carry
+
+(define_insn "vec_addc<mode>"
+ [(set (match_operand:VI_HW 0 "register_operand" "=v")
+ (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v")
+ (match_operand:VI_HW 2 "register_operand" "v")]
+ UNSPEC_VEC_ADDC))]
+ "TARGET_VX"
+ "vacc<bhfgq>\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "vec_addc_u128"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VEC_ADDC_U128))]
+ "TARGET_VX"
+ "vaccq\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector add with carry
+
+(define_insn "vec_adde_u128"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_VEC_ADDE_U128))]
+ "TARGET_VX"
+ "vacq\t%v0,%v1,%v2,%v3"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector add with carry compute carry
+
+(define_insn "vec_addec_u128"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_VEC_ADDEC_U128))]
+ "TARGET_VX"
+ "vacccq\t%v0,%v1,%v2,%v3"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector and
+
+; The following two patterns allow mixed mode and's as required for the intrinsics.
+(define_insn "and_av2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=v")
+ (and:V2DF (subreg:V2DF (match_operand:V2DI 1 "register_operand" "v") 0)
+ (match_operand:V2DF 2 "register_operand" "v")))]
+ "TARGET_VX"
+ "vn\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "and_cv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=v")
+ (and:V2DF (match_operand:V2DF 1 "register_operand" "v")
+ (subreg:V2DF (match_operand:V2DI 2 "register_operand" "v") 0)))]
+ "TARGET_VX"
+ "vn\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector and with complement
+
+; vnc
+(define_insn "vec_andc<mode>3"
+ [(set (match_operand:VT_HW 0 "register_operand" "=v")
+ (and:VT_HW (not:VT_HW (match_operand:VT_HW 2 "register_operand" "v"))
+ (match_operand:VT_HW 1 "register_operand" "v")))]
+ "TARGET_VX"
+ "vnc\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+; The following two patterns allow mixed mode and's as required for the intrinsics.
+(define_insn "vec_andc_av2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=v")
+ (and:V2DF (not:V2DF (match_operand:V2DF 2 "register_operand" "v"))
+ (subreg:V2DF (match_operand:V2DI 1 "register_operand" "v") 0)))]
+
+ "TARGET_VX"
+ "vnc\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "vec_andc_cv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=v")
+ (and:V2DF (not:V2DF (subreg:V2DF (match_operand:V2DI 2 "register_operand" "v") 0))
+ (match_operand:V2DF 1 "register_operand" "v")))]
+ "TARGET_VX"
+ "vnc\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector average
+
+(define_insn "vec_avg<mode>"
+ [(set (match_operand:VI_HW 0 "register_operand" "=v")
+ (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v")
+ (match_operand:VI_HW 2 "register_operand" "v")]
+ UNSPEC_VEC_AVG))]
+ "TARGET_VX"
+ "vavg<bhfgq>\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+; Vector average logical
+
+(define_insn "vec_avgu<mode>"
+ [(set (match_operand:VI_HW 0 "register_operand" "=v")
+ (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v")
+ (match_operand:VI_HW 2 "register_operand" "v")]
+ UNSPEC_VEC_AVGU))]
+ "TARGET_VX"
+ "vavgl<bhfgq>\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector checksum
+
+(define_insn "vec_checksum"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VEC_CHECKSUM))]
+ "TARGET_VX"
+ "vcksm\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+;;
+;; Vector compare
+;;
+
+; vec_all/any int compares
+
+(define_expand "vec_all_<intcmpcc:code><VI_HW:mode>"
+ [(match_operand:SI 0 "register_operand" "")
+ (intcmpcc (match_operand:VI_HW 1 "register_operand" "")
+ (match_operand:VI_HW 2 "register_operand" ""))]
+ "TARGET_VX"
+{
+ s390_expand_vec_compare_cc (operands[0],
+ <intcmpcc:CODE>,
+ operands[1],
+ operands[2],
+ true);
+ DONE;
+})
+
+(define_expand "vec_any_<intcmpcc:code><VI_HW:mode>"
+ [(match_operand:SI 0 "register_operand" "")
+ (intcmpcc (match_operand:VI_HW 1 "register_operand" "")
+ (match_operand:VI_HW 2 "register_operand" ""))]
+ "TARGET_VX"
+{
+ s390_expand_vec_compare_cc (operands[0],
+ <intcmpcc:CODE>,
+ operands[1],
+ operands[2],
+ false);
+ DONE;
+})
+
+; vec_all/any fp compares
+
+(define_expand "vec_all_<fpcmpcc:code>v2df"
+ [(match_operand:SI 0 "register_operand" "")
+ (fpcmpcc (match_operand:V2DF 1 "register_operand" "")
+ (match_operand:V2DF 2 "register_operand" ""))]
+ "TARGET_VX"
+{
+ s390_expand_vec_compare_cc (operands[0],
+ <fpcmpcc:CODE>,
+ operands[1],
+ operands[2],
+ true);
+ DONE;
+})
+
+(define_expand "vec_any_<fpcmpcc:code>v2df"
+ [(match_operand:SI 0 "register_operand" "")
+ (fpcmpcc (match_operand:V2DF 1 "register_operand" "")
+ (match_operand:V2DF 2 "register_operand" ""))]
+ "TARGET_VX"
+{
+ s390_expand_vec_compare_cc (operands[0],
+ <fpcmpcc:CODE>,
+ operands[1],
+ operands[2],
+ false);
+ DONE;
+})
+
+
+; Compare without generating CC
+
+(define_expand "vec_cmp<intcmp:code><VI_HW:mode>"
+ [(set (match_operand:VI_HW 0 "register_operand" "=v")
+ (intcmp:VI_HW (match_operand:VI_HW 1 "register_operand" "v")
+ (match_operand:VI_HW 2 "register_operand" "v")))]
+ "TARGET_VX"
+{
+ s390_expand_vec_compare (operands[0], <intcmp:CODE>, operands[1], operands[2]);
+ DONE;
+})
+
+(define_expand "vec_cmp<fpcmp:code>v2df"
+ [(set (match_operand:V2DI 0 "register_operand" "=v")
+ (fpcmp:V2DI (match_operand:V2DF 1 "register_operand" "v")
+ (match_operand:V2DF 2 "register_operand" "v")))]
+ "TARGET_VX"
+{
+ s390_expand_vec_compare (operands[0], <fpcmp:CODE>, operands[1], operands[2]);
+ DONE;
+})
+
+
+; Vector count leading zeros
+
+; vec_cntlz -> clz
+; vec_cnttz -> ctz
+
+; Vector xor
+
+; vec_xor -> xor
+
+; The following two patterns allow mixed mode xor's as required for the intrinsics.
+(define_insn "xor_av2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=v")
+ (xor:V2DF (subreg:V2DF (match_operand:V2DI 1 "register_operand" "v") 0)
+ (match_operand:V2DF 2 "register_operand" "v")))]
+ "TARGET_VX"
+ "vx\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "xor_cv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=v")
+ (xor:V2DF (match_operand:V2DF 1 "register_operand" "v")
+ (subreg:V2DF (match_operand:V2DI 2 "register_operand" "v") 0)))]
+ "TARGET_VX"
+ "vx\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector Galois field multiply sum
+
+(define_insn "vec_gfmsum<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")]
+ UNSPEC_VEC_GFMSUM))]
+ "TARGET_VX"
+ "vgfm<bhfgq>\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "vec_gfmsum_128"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V2DI 1 "register_operand" "v")
+ (match_operand:V2DI 2 "register_operand" "v")]
+ UNSPEC_VEC_GFMSUM_128))]
+ "TARGET_VX"
+ "vgfmg\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "vec_gfmsum_accum<mode>"
+ [(set (match_operand:<vec_double> 0 "register_operand" "=v")
+ (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")
+ (match_operand:<vec_double> 3 "register_operand" "v")]
+ UNSPEC_VEC_GFMSUM_ACCUM))]
+ "TARGET_VX"
+ "vgfma<bhfgq>\t%v0,%v1,%v2,%v3"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "vec_gfmsum_accum_128"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V2DI 1 "register_operand" "v")
+ (match_operand:V2DI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_VEC_GFMSUM_ACCUM_128))]
+ "TARGET_VX"
+ "vgfmag\t%v0,%v1,%v2,%v3"
+ [(set_attr "op_type" "VRR")])
+
+
+; FIXME: vec_neg ?
+
+; Vector load positive: vec_abs -> abs
+; Vector maximum vec_max -> smax, logical vec_max -> umax
+; Vector maximum vec_min -> smin, logical vec_min -> umin
+
+
+; Vector multiply and add high
+
+; vec_mladd -> vec_vmal
+; vmalb, vmalh, vmalf, vmalg
+(define_insn "vec_vmal<mode>"
+ [(set (match_operand:VI_HW 0 "register_operand" "=v")
+ (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v")
+ (match_operand:VI_HW 2 "register_operand" "v")
+ (match_operand:VI_HW 3 "register_operand" "v")]
+ UNSPEC_VEC_VMAL))]
+ "TARGET_VX"
+ "vmal<bhfgq><w>\t%v0,%v1,%v2,%v3"
+ [(set_attr "op_type" "VRR")])
+
+; vec_mhadd -> vec_vmah/vec_vmalh
+
+; vmahb; vmahh, vmahf, vmahg
+(define_insn "vec_vmah<mode>"
+ [(set (match_operand:VI_HW 0 "register_operand" "=v")
+ (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v")
+ (match_operand:VI_HW 2 "register_operand" "v")
+ (match_operand:VI_HW 3 "register_operand" "v")]
+ UNSPEC_VEC_VMAH))]
+ "TARGET_VX"
+ "vmah<bhfgq>\t%v0,%v1,%v2,%v3"
+ [(set_attr "op_type" "VRR")])
+
+; vmalhb; vmalhh, vmalhf, vmalhg
+(define_insn "vec_vmalh<mode>"
+ [(set (match_operand:VI_HW 0 "register_operand" "=v")
+ (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v")
+ (match_operand:VI_HW 2 "register_operand" "v")
+ (match_operand:VI_HW 3 "register_operand" "v")]
+ UNSPEC_VEC_VMALH))]
+ "TARGET_VX"
+ "vmalh<bhfgq>\t%v0,%v1,%v2,%v3"
+ [(set_attr "op_type" "VRR")])
+
+; vec_meadd -> vec_vmae/vec_vmale
+
+; vmaeb; vmaeh, vmaef, vmaeg
+(define_insn "vec_vmae<mode>"
+ [(set (match_operand:<vec_double> 0 "register_operand" "=v")
+ (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")
+ (match_operand:<vec_double> 3 "register_operand" "v")]
+ UNSPEC_VEC_VMAE))]
+ "TARGET_VX"
+ "vmae<bhfgq>\t%v0,%v1,%v2,%v3"
+ [(set_attr "op_type" "VRR")])
+
+; vmaleb; vmaleh, vmalef, vmaleg
+(define_insn "vec_vmale<mode>"
+ [(set (match_operand:<vec_double> 0 "register_operand" "=v")
+ (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")
+ (match_operand:<vec_double> 3 "register_operand" "v")]
+ UNSPEC_VEC_VMALE))]
+ "TARGET_VX"
+ "vmale<bhfgq>\t%v0,%v1,%v2,%v3"
+ [(set_attr "op_type" "VRR")])
+
+; vec_moadd -> vec_vmao/vec_vmalo
+
+; vmaob; vmaoh, vmaof, vmaog
+(define_insn "vec_vmao<mode>"
+ [(set (match_operand:<vec_double> 0 "register_operand" "=v")
+ (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")
+ (match_operand:<vec_double> 3 "register_operand" "v")]
+ UNSPEC_VEC_VMAO))]
+ "TARGET_VX"
+ "vmao<bhfgq>\t%v0,%v1,%v2,%v3"
+ [(set_attr "op_type" "VRR")])
+
+; vmalob; vmaloh, vmalof, vmalog
+(define_insn "vec_vmalo<mode>"
+ [(set (match_operand:<vec_double> 0 "register_operand" "=v")
+ (unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")
+ (match_operand:<vec_double> 3 "register_operand" "v")]
+ UNSPEC_VEC_VMALO))]
+ "TARGET_VX"
+ "vmalo<bhfgq>\t%v0,%v1,%v2,%v3"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector multiply high
+
+; vec_mulh -> vec_smulh/vec_umulh
+
+; vmhb, vmhh, vmhf
+(define_insn "vec_smulh<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")]
+ UNSPEC_VEC_SMULT_HI))]
+ "TARGET_VX"
+ "vmh<bhfgq>\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+; vmlhb, vmlhh, vmlhf
+(define_insn "vec_umulh<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")]
+ UNSPEC_VEC_UMULT_HI))]
+ "TARGET_VX"
+ "vmlh<bhfgq>\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector multiply low
+
+; vec_mule -> vec_widen_umult_even/vec_widen_smult_even
+; vec_mulo -> vec_widen_umult_odd/vec_widen_smult_odd
+
+
+; Vector nor
+
+(define_insn "vec_nor<mode>3"
+ [(set (match_operand:VT_HW 0 "register_operand" "=v")
+ (not:VT_HW (ior:VT_HW (match_operand:VT_HW 1 "register_operand" "v")
+ (match_operand:VT_HW 2 "register_operand" "v"))))]
+ "TARGET_VX"
+ "vno\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+; The following two patterns allow mixed mode and's as required for the intrinsics.
+(define_insn "vec_nor_av2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=v")
+ (not:V2DF (ior:V2DF (subreg:V2DF (match_operand:V2DI 1 "register_operand" "v") 0)
+ (match_operand:V2DF 2 "register_operand" "v"))))]
+ "TARGET_VX"
+ "vno\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "vec_nor_cv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=v")
+ (not:V2DF (ior:V2DF (match_operand:V2DF 1 "register_operand" "v")
+ (subreg:V2DF (match_operand:V2DI 2 "register_operand" "v") 0))))]
+ "TARGET_VX"
+ "vno\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector or
+
+; The following two patterns allow mixed mode or's as required for the intrinsics.
+(define_insn "ior_av2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=v")
+ (ior:V2DF (subreg:V2DF (match_operand:V2DI 1 "register_operand" "v") 0)
+ (match_operand:V2DF 2 "register_operand" "v")))]
+ "TARGET_VX"
+ "vo\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "ior_cv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=v")
+ (ior:V2DF (match_operand:V2DF 1 "register_operand" "v")
+ (subreg:V2DF (match_operand:V2DI 2 "register_operand" "v") 0)))]
+ "TARGET_VX"
+ "vo\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector population count vec_popcnt -> popcount
+; Vector element rotate left logical vec_rl -> vrotl, vec_rli -> rot
+
+; Vector element rotate and insert under mask
+
+; verimb, verimh, verimf, verimg
+(define_insn "verim<mode>"
+ [(set (match_operand:VI_HW 0 "register_operand" "=v")
+ (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "0")
+ (match_operand:VI_HW 2 "register_operand" "v")
+ (match_operand:VI_HW 3 "register_operand" "v")
+ (match_operand:SI 4 "immediate_operand" "I")]
+ UNSPEC_VEC_RL_MASK))]
+ "TARGET_VX"
+ "verim<bhfgq>\t%v0,%v2,%v3,%b4"
+ [(set_attr "op_type" "VRI")])
+
+
+; Vector shift left
+
+(define_insn "vec_sll<VI_HW:mode><VI_HW_QHS:mode>"
+ [(set (match_operand:VI_HW 0 "register_operand" "=v")
+ (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")]
+ UNSPEC_VEC_SLL))]
+ "TARGET_VX"
+ "vsl\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector shift left by byte
+
+(define_insn "vec_slb<mode>"
+ [(set (match_operand:V_HW 0 "register_operand" "=v")
+ (unspec:V_HW [(match_operand:V_HW 1 "register_operand" "v")
+ (match_operand:<tointvec> 2 "register_operand" "v")]
+ UNSPEC_VEC_SLB))]
+ "TARGET_VX"
+ "vslb\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector shift left double by byte
+
+(define_insn "vec_sld<mode>"
+ [(set (match_operand:V_HW 0 "register_operand" "=v")
+ (unspec:V_HW [(match_operand:V_HW 1 "register_operand" "v")
+ (match_operand:V_HW 2 "register_operand" "v")
+ (match_operand:DI 3 "immediate_operand" "C")]
+ UNSPEC_VEC_SLDB))]
+ "TARGET_VX"
+ "vsldb\t%v0,%v1,%v2,%b3"
+ [(set_attr "op_type" "VRI")])
+
+(define_expand "vec_sldw<mode>"
+ [(set (match_operand:V_HW 0 "register_operand" "")
+ (unspec:V_HW [(match_operand:V_HW 1 "register_operand" "")
+ (match_operand:V_HW 2 "register_operand" "")
+ (match_operand:DI 3 "immediate_operand" "")]
+ UNSPEC_VEC_SLDB))]
+ "TARGET_VX"
+{
+ operands[3] = GEN_INT (INTVAL (operands[3]) << 2);
+})
+
+; Vector shift right arithmetic
+
+(define_insn "vec_sral<VI_HW:mode><VI_HW_QHS:mode>"
+ [(set (match_operand:VI_HW 0 "register_operand" "=v")
+ (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")]
+ UNSPEC_VEC_SRAL))]
+ "TARGET_VX"
+ "vsra\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector shift right arithmetic by byte
+
+(define_insn "vec_srab<mode>"
+ [(set (match_operand:V_HW 0 "register_operand" "=v")
+ (unspec:V_HW [(match_operand:V_HW 1 "register_operand" "v")
+ (match_operand:<tointvec> 2 "register_operand" "v")]
+ UNSPEC_VEC_SRAB))]
+ "TARGET_VX"
+ "vsrab\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector shift right logical
+
+(define_insn "vec_srl<VI_HW:mode><VI_HW_QHS:mode>"
+ [(set (match_operand:VI_HW 0 "register_operand" "=v")
+ (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")]
+ UNSPEC_VEC_SRL))]
+ "TARGET_VX"
+ "vsrl\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector shift right logical by byte
+
+; Pattern definition in vector.md
+(define_expand "vec_srb<mode>"
+ [(set (match_operand:V_HW 0 "register_operand" "")
+ (unspec:V_HW [(match_operand:V_HW 1 "register_operand" "")
+ (match_operand:<tointvec> 2 "register_operand" "")]
+ UNSPEC_VEC_SRLB))]
+ "TARGET_VX")
+
+
+; Vector subtract
+
+(define_insn "vec_sub_u128"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VEC_SUB_U128))]
+ "TARGET_VX"
+ "vsq\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector subtract compute borrow indication
+
+(define_insn "vec_subc<mode>"
+ [(set (match_operand:VI_HW 0 "register_operand" "=v")
+ (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v")
+ (match_operand:VI_HW 2 "register_operand" "v")]
+ UNSPEC_VEC_SUBC))]
+ "TARGET_VX"
+ "vscbi<bhfgq>\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "vec_subc_u128"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VEC_SUBC_U128))]
+ "TARGET_VX"
+ "vscbiq\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector subtract with borrow indication
+
+(define_insn "vec_sube_u128"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_VEC_SUBE_U128))]
+ "TARGET_VX"
+ "vsbiq\t%v0,%v1,%v2,%v3"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector subtract with borrow compute and borrow indication
+
+(define_insn "vec_subec_u128"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_VEC_SUBEC_U128))]
+ "TARGET_VX"
+ "vsbcbiq\t%v0,%v1,%v2,%v3"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector sum across
+
+; Sum across DImode parts of the 1st operand and add the rightmost
+; element of 2nd operand
+; vsumgh, vsumgf
+(define_expand "vec_sum2<mode>"
+ [(set (match_operand:V2DI 0 "register_operand" "")
+ (unspec:V2DI [(match_operand:VI_HW_HS 1 "register_operand" "")
+ (match_operand:VI_HW_HS 2 "register_operand" "")]
+ UNSPEC_VEC_VSUMG))]
+ "TARGET_VX")
+
+; vsumqh, vsumqf
+(define_insn "vec_sum_u128<mode>"
+ [(set (match_operand:V2DI 0 "register_operand" "=v")
+ (unspec:V2DI [(match_operand:VI_HW_SD 1 "register_operand" "v")
+ (match_operand:VI_HW_SD 2 "register_operand" "v")]
+ UNSPEC_VEC_VSUMQ))]
+ "TARGET_VX"
+ "vsumq<bhfgq>\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+; vsumb, vsumh
+(define_expand "vec_sum4<mode>"
+ [(set (match_operand:V4SI 0 "register_operand" "")
+ (unspec:V4SI [(match_operand:VI_HW_QH 1 "register_operand" "")
+ (match_operand:VI_HW_QH 2 "register_operand" "")]
+ UNSPEC_VEC_VSUM))]
+ "TARGET_VX")
+
+
+; Vector test under mask
+
+(define_expand "vec_test_mask_int<mode>"
+ [(set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_operand:V_HW 1 "register_operand" "")
+ (match_operand:<tointvec> 2 "register_operand" "")]
+ UNSPEC_VEC_TEST_MASK))
+ (set (match_operand:SI 0 "register_operand" "")
+ (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX")
+
+(define_insn "*vec_test_mask<mode>"
+ [(set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_operand:V_HW 0 "register_operand" "v")
+ (match_operand:<tointvec> 1 "register_operand" "v")]
+ UNSPEC_VEC_TEST_MASK))]
+ "TARGET_VX"
+ "vtm\t%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+
+; Vector find any element equal
+
+; vfaeb, vfaeh, vfaef
+; vfaezb, vfaezh, vfaezf
+(define_insn "vfae<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")
+ (match_operand:SI 3 "immediate_operand" "C")]
+ UNSPEC_VEC_VFAE))]
+ "TARGET_VX"
+{
+ unsigned HOST_WIDE_INT flags = INTVAL (operands[3]);
+
+ if (flags & VSTRING_FLAG_ZS)
+ {
+ flags &= ~VSTRING_FLAG_ZS;
+ operands[3] = GEN_INT (flags);
+ return "vfaez<bhfgq>\t%v0,%v1,%v2,%b3";
+ }
+ return "vfae<bhfgq>\t%v0,%v1,%v2,%b3";
+}
+[(set_attr "op_type" "VRR")])
+
+; vfaebs, vfaehs, vfaefs
+; vfaezbs, vfaezhs, vfaezfs
+(define_insn "*vfaes<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")
+ (match_operand:SI 3 "immediate_operand" "C")]
+ UNSPEC_VEC_VFAE))
+ (set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_dup 1)
+ (match_dup 2)
+ (match_dup 3)]
+ UNSPEC_VEC_VFAECC))]
+ "TARGET_VX"
+{
+ unsigned HOST_WIDE_INT flags = INTVAL (operands[3]);
+
+ if (flags & VSTRING_FLAG_ZS)
+ {
+ flags &= ~VSTRING_FLAG_ZS;
+ operands[3] = GEN_INT (flags);
+ return "vfaez<bhfgq>s\t%v0,%v1,%v2,%b3";
+ }
+ return "vfae<bhfgq>s\t%v0,%v1,%v2,%b3";
+}
+ [(set_attr "op_type" "VRR")])
+
+(define_expand "vfaez<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")
+ (match_operand:SI 3 "immediate_operand" "C")]
+ UNSPEC_VEC_VFAE))]
+ "TARGET_VX"
+{
+ operands[3] = GEN_INT (INTVAL (operands[3]) | VSTRING_FLAG_ZS);
+})
+
+(define_expand "vfaes<mode>"
+ [(parallel
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+ (match_operand:VI_HW_QHS 2 "register_operand" "")
+ (match_operand:SI 3 "immediate_operand" "C")]
+ UNSPEC_VEC_VFAE))
+ (set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_dup 1)
+ (match_dup 2)
+ (match_dup 3)]
+ UNSPEC_VEC_VFAECC))])
+ (set (match_operand:SI 4 "memory_operand" "")
+ (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX"
+{
+ operands[3] = GEN_INT (INTVAL (operands[3]) | VSTRING_FLAG_CS);
+})
+
+(define_expand "vfaezs<mode>"
+ [(parallel
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+ (match_operand:VI_HW_QHS 2 "register_operand" "")
+ (match_operand:SI 3 "immediate_operand" "C")]
+ UNSPEC_VEC_VFAE))
+ (set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_dup 1)
+ (match_dup 2)
+ (match_dup 3)]
+ UNSPEC_VEC_VFAECC))])
+ (set (match_operand:SI 4 "memory_operand" "")
+ (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX"
+{
+ operands[3] = GEN_INT (INTVAL (operands[3]) | VSTRING_FLAG_CS | VSTRING_FLAG_ZS);
+})
+
+
+; Vector find element equal
+
+; vfeebs, vfeehs, vfeefs
+; vfeezbs, vfeezhs, vfeezfs
+(define_insn "*vfees<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")
+ (match_operand:QI 3 "immediate_operand" "C")]
+ UNSPEC_VEC_VFEE))
+ (set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_dup 1)
+ (match_dup 2)
+ (match_dup 3)]
+ UNSPEC_VEC_VFEECC))]
+ "TARGET_VX"
+{
+ unsigned HOST_WIDE_INT flags = INTVAL (operands[3]);
+
+ gcc_assert (!(flags & ~(VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
+ flags &= ~VSTRING_FLAG_CS;
+
+ if (flags == VSTRING_FLAG_ZS)
+ return "vfeez<bhfgq>s\t%v0,%v1,%v2";
+ return "vfee<bhfgq>s\t%v0,%v1,%v2,%b3";
+}
+ [(set_attr "op_type" "VRR")])
+
+; vfeeb, vfeeh, vfeef
+(define_insn "vfee<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+ (match_operand:VI_HW_QHS 2 "register_operand" "")
+ (const_int 0)]
+ UNSPEC_VEC_VFEE))]
+ "TARGET_VX"
+ "vfee<bhfgq>\t%v0,%v1,%v2,0"
+ [(set_attr "op_type" "VRR")])
+
+; vfeezb, vfeezh, vfeezf
+(define_insn "vfeez<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+ (match_operand:VI_HW_QHS 2 "register_operand" "")
+ (const_int VSTRING_FLAG_ZS)]
+ UNSPEC_VEC_VFEE))]
+ "TARGET_VX"
+ "vfeez<bhfgq>s\t%v0,%v1,%v2,2"
+ [(set_attr "op_type" "VRR")])
+
+(define_expand "vfees<mode>"
+ [(parallel
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+ (match_operand:VI_HW_QHS 2 "register_operand" "")
+ (const_int VSTRING_FLAG_CS)]
+ UNSPEC_VEC_VFEE))
+ (set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_dup 1)
+ (match_dup 2)
+ (const_int VSTRING_FLAG_CS)]
+ UNSPEC_VEC_VFEECC))])
+ (set (match_operand:SI 3 "memory_operand" "")
+ (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX")
+
+(define_expand "vfeezs<mode>"
+ [(parallel
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+ (match_operand:VI_HW_QHS 2 "register_operand" "")
+ (match_dup 4)]
+ UNSPEC_VEC_VFEE))
+ (set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_dup 1)
+ (match_dup 2)
+ (match_dup 4)]
+ UNSPEC_VEC_VFEECC))])
+ (set (match_operand:SI 3 "memory_operand" "")
+ (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX"
+{
+ operands[4] = GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS);
+})
+
+; Vector find element not equal
+
+; vfeneb, vfeneh, vfenef
+(define_insn "vfene<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")
+ (const_int 0)]
+ UNSPEC_VEC_VFENE))]
+ "TARGET_VX"
+ "vfene<bhfgq>\t%v0,%v1,%v2,0"
+ [(set_attr "op_type" "VRR")])
+
+; vec_vfenes can be found in vector.md since it is used for strlen
+
+; vfenezb, vfenezh, vfenezf
+(define_insn "vfenez<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+ (match_operand:VI_HW_QHS 2 "register_operand" "")
+ (const_int VSTRING_FLAG_ZS)]
+ UNSPEC_VEC_VFENE))]
+ "TARGET_VX"
+ "vfenez<bhfgq>\t%v0,%v1,%v2"
+ [(set_attr "op_type" "VRR")])
+
+(define_expand "vfenes<mode>"
+ [(parallel
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+ (match_operand:VI_HW_QHS 2 "register_operand" "")
+ (const_int VSTRING_FLAG_CS)]
+ UNSPEC_VEC_VFENE))
+ (set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_dup 1)
+ (match_dup 2)
+ (const_int VSTRING_FLAG_CS)]
+ UNSPEC_VEC_VFENECC))])
+ (set (match_operand:SI 3 "memory_operand" "")
+ (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX")
+
+(define_expand "vfenezs<mode>"
+ [(parallel
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+ (match_operand:VI_HW_QHS 2 "register_operand" "")
+ (match_dup 4)]
+ UNSPEC_VEC_VFENE))
+ (set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_dup 1)
+ (match_dup 2)
+ (match_dup 4)]
+ UNSPEC_VEC_VFENECC))])
+ (set (match_operand:SI 3 "memory_operand" "")
+ (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX"
+{
+ operands[4] = GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS);
+})
+
+; Vector isolate string
+
+; vistrb, vistrh, vistrf
+(define_insn "vistr<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")]
+ UNSPEC_VEC_VISTR))]
+ "TARGET_VX"
+ "vistr<bhfgq>\t%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+; vistrbs, vistrhs, vistrfs
+(define_insn "*vistrs<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")]
+ UNSPEC_VEC_VISTR))
+ (set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_dup 1)] UNSPEC_VEC_VISTRCC))]
+ "TARGET_VX"
+ "vistr<bhfgq>s\t%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+(define_expand "vistrs<mode>"
+ [(parallel
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")]
+ UNSPEC_VEC_VISTR))
+ (set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_dup 1)]
+ UNSPEC_VEC_VISTRCC))])
+ (set (match_operand:SI 2 "memory_operand" "")
+ (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX")
+
+
+; Vector compare range
+
+; vstrcb, vstrch, vstrcf
+; vstrczb, vstrczh, vstrczf
+(define_insn "vstrc<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")
+ (match_operand:VI_HW_QHS 3 "register_operand" "v")
+ (match_operand:SI 4 "immediate_operand" "C")]
+ UNSPEC_VEC_VSTRC))]
+ "TARGET_VX"
+{
+ unsigned HOST_WIDE_INT flags = INTVAL (operands[4]);
+
+ if (flags & VSTRING_FLAG_ZS)
+ {
+ flags &= ~VSTRING_FLAG_ZS;
+ operands[4] = GEN_INT (flags);
+ return "vstrcz<bhfgq>\t%v0,%v1,%v2,%v3,%b4";
+ }
+ return "vstrc<bhfgq>\t%v0,%v1,%v2,%v3,%b4";
+}
+[(set_attr "op_type" "VRR")])
+
+; vstrcbs, vstrchs, vstrcfs
+; vstrczbs, vstrczhs, vstrczfs
+(define_insn "*vstrcs<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")
+ (match_operand:VI_HW_QHS 3 "register_operand" "v")
+ (match_operand:SI 4 "immediate_operand" "C")]
+ UNSPEC_VEC_VSTRC))
+ (set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_dup 1)
+ (match_dup 2)
+ (match_dup 3)
+ (match_dup 4)]
+ UNSPEC_VEC_VSTRCCC))]
+ "TARGET_VX"
+{
+ unsigned HOST_WIDE_INT flags = INTVAL (operands[4]);
+
+ if (flags & VSTRING_FLAG_ZS)
+ {
+ flags &= ~VSTRING_FLAG_ZS;
+ operands[4] = GEN_INT (flags);
+ return "vstrcz<bhfgq>s\t%v0,%v1,%v2,%v3,%b4";
+ }
+ return "vstrc<bhfgq>s\t%v0,%v1,%v2,%v3,%b4";
+}
+ [(set_attr "op_type" "VRR")])
+
+(define_expand "vstrcz<mode>"
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+ (match_operand:VI_HW_QHS 2 "register_operand" "v")
+ (match_operand:VI_HW_QHS 3 "register_operand" "v")
+ (match_operand:SI 4 "immediate_operand" "C")]
+ UNSPEC_VEC_VSTRC))]
+ "TARGET_VX"
+{
+ operands[4] = GEN_INT (INTVAL (operands[4]) | VSTRING_FLAG_ZS);
+})
+
+(define_expand "vstrcs<mode>"
+ [(parallel
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+ (match_operand:VI_HW_QHS 2 "register_operand" "")
+ (match_operand:VI_HW_QHS 3 "register_operand" "")
+ (match_operand:SI 4 "immediate_operand" "C")]
+ UNSPEC_VEC_VSTRC))
+ (set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_dup 1)
+ (match_dup 2)
+ (match_dup 3)
+ (match_dup 4)]
+ UNSPEC_VEC_VSTRCCC))])
+ (set (match_operand:SI 5 "memory_operand" "")
+ (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX"
+{
+ operands[4] = GEN_INT (INTVAL (operands[4]) | VSTRING_FLAG_CS);
+})
+
+(define_expand "vstrczs<mode>"
+ [(parallel
+ [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+ (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+ (match_operand:VI_HW_QHS 2 "register_operand" "")
+ (match_operand:VI_HW_QHS 3 "register_operand" "")
+ (match_operand:SI 4 "immediate_operand" "C")]
+ UNSPEC_VEC_VSTRC))
+ (set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_dup 1)
+ (match_dup 2)
+ (match_dup 3)
+ (match_dup 4)]
+ UNSPEC_VEC_VSTRCCC))])
+ (set (match_operand:SI 5 "memory_operand" "")
+ (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX"
+{
+ operands[4] = GEN_INT (INTVAL (operands[4]) | VSTRING_FLAG_CS | VSTRING_FLAG_ZS);
+})
+
+
+; Signed V2DI -> V2DF conversion - inexact exception disabled
+(define_insn "vec_di_to_df_s64"
+ [(set (match_operand:V2DF 0 "register_operand" "=v")
+ (unspec:V2DF [(match_operand:V2DI 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "C")]
+ UNSPEC_VEC_VCDGB))]
+ "TARGET_VX"
+ "vcdgb\t%v0,%v1,4,%b2"
+ [(set_attr "op_type" "VRR")])
+
+; The result needs to be multiplied with 2**-op2
+(define_expand "vec_ctd_s64"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (unspec:V2DF [(match_operand:V2DI 1 "register_operand" "")
+ (const_int 0)] ; According to current BFP rounding mode
+ UNSPEC_VEC_VCDGB))
+ (use (match_operand:QI 2 "immediate_operand" ""))
+ (set (match_dup 0) (mult:V2DF (match_dup 0) (match_dup 3)))]
+ "TARGET_VX"
+{
+ REAL_VALUE_TYPE f;
+ rtx c;
+
+ real_2expN (&f, -INTVAL (operands[2]), DFmode);
+ c = CONST_DOUBLE_FROM_REAL_VALUE (f, DFmode);
+
+ operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c));
+ operands[3] = force_reg (V2DFmode, operands[3]);
+})
+
+; Unsigned V2DI -> V2DF conversion - inexact exception disabled
+(define_insn "vec_di_to_df_u64"
+ [(set (match_operand:V2DF 0 "register_operand" "=v")
+ (unspec:V2DF [(match_operand:V2DI 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "C")]
+ UNSPEC_VEC_VCDLGB))]
+ "TARGET_VX"
+ "vcdlgb\t%v0,%v1,4,%b2"
+ [(set_attr "op_type" "VRR")])
+
+; The result needs to be multiplied with 2**-op2
+(define_expand "vec_ctd_u64"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (unspec:V2DF [(match_operand:V2DI 1 "register_operand" "")
+ (const_int 0)] ; According to current BFP rounding mode
+ UNSPEC_VEC_VCDLGB))
+ (use (match_operand:QI 2 "immediate_operand" ""))
+ (set (match_dup 0) (mult:V2DF (match_dup 0) (match_dup 3)))]
+ "TARGET_VX"
+{
+ REAL_VALUE_TYPE f;
+ rtx c;
+
+ real_2expN (&f, -INTVAL (operands[2]), DFmode);
+ c = CONST_DOUBLE_FROM_REAL_VALUE (f, DFmode);
+
+ operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c));
+ operands[3] = force_reg (V2DFmode, operands[3]);
+})
+
+
+; Signed V2DF -> V2DI conversion - inexact exception disabled
+(define_insn "vec_df_to_di_s64"
+ [(set (match_operand:V2DI 0 "register_operand" "=v")
+ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "C")]
+ UNSPEC_VEC_VCGDB))]
+ "TARGET_VX"
+ "vcgdb\t%v0,%v1,4,%b2"
+ [(set_attr "op_type" "VRR")])
+
+; The input needs to be multiplied with 2**op2
+(define_expand "vec_ctsl"
+ [(use (match_operand:QI 2 "immediate_operand" ""))
+ (set (match_dup 4) (mult:V2DF (match_operand:V2DF 1 "register_operand" "")
+ (match_dup 3)))
+ (set (match_operand:V2DI 0 "register_operand" "")
+ (unspec:V2DI [(match_dup 4) (const_int 0)] ; According to current BFP rounding mode
+ UNSPEC_VEC_VCGDB))]
+ "TARGET_VX"
+{
+ REAL_VALUE_TYPE f;
+ rtx c;
+
+ real_2expN (&f, INTVAL (operands[2]), DFmode);
+ c = CONST_DOUBLE_FROM_REAL_VALUE (f, DFmode);
+
+ operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c));
+ operands[3] = force_reg (V2DFmode, operands[3]);
+ operands[4] = gen_reg_rtx (V2DFmode);
+})
+
+; Unsigned V2DF -> V2DI conversion - inexact exception disabled
+(define_insn "vec_df_to_di_u64"
+ [(set (match_operand:V2DI 0 "register_operand" "=v")
+ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "C")]
+ UNSPEC_VEC_VCLGDB))]
+ "TARGET_VX"
+ "vclgdb\t%v0,%v1,4,%b2"
+ [(set_attr "op_type" "VRR")])
+
+; The input needs to be multiplied with 2**op2
+(define_expand "vec_ctul"
+ [(use (match_operand:QI 2 "immediate_operand" ""))
+ (set (match_dup 4) (mult:V2DF (match_operand:V2DF 1 "register_operand" "")
+ (match_dup 3)))
+ (set (match_operand:V2DI 0 "register_operand" "")
+ (unspec:V2DI [(match_dup 4) (const_int 0)] ; According to current BFP rounding mode
+ UNSPEC_VEC_VCLGDB))]
+ "TARGET_VX"
+{
+ REAL_VALUE_TYPE f;
+ rtx c;
+
+ real_2expN (&f, INTVAL (operands[2]), DFmode);
+ c = CONST_DOUBLE_FROM_REAL_VALUE (f, DFmode);
+
+ operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c));
+ operands[3] = force_reg (V2DFmode, operands[3]);
+ operands[4] = gen_reg_rtx (V2DFmode);
+})
+
+; Vector load fp integer - IEEE inexact exception is suppressed
+(define_insn "vfidb"
+ [(set (match_operand:V2DI 0 "register_operand" "=v")
+ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "C")
+ (match_operand:QI 3 "immediate_operand" "C")]
+ UNSPEC_VEC_VFIDB))]
+ "TARGET_VX"
+ "vfidb\t%v0,%v1,%b2,%b3"
+ [(set_attr "op_type" "VRR")])
+
+(define_expand "vec_ceil"
+ [(set (match_operand:V2DI 0 "register_operand" "")
+ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "")
+ (const_int VEC_RND_TO_INF)]
+ UNSPEC_VEC_VFIDB))]
+ "TARGET_VX")
+
+(define_expand "vec_floor"
+ [(set (match_operand:V2DI 0 "register_operand" "")
+ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "")
+ (const_int VEC_RND_TO_MINF)]
+ UNSPEC_VEC_VFIDB))]
+ "TARGET_VX")
+
+(define_expand "vec_trunc"
+ [(set (match_operand:V2DI 0 "register_operand" "")
+ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "")
+ (const_int VEC_RND_TO_ZERO)]
+ UNSPEC_VEC_VFIDB))]
+ "TARGET_VX")
+
+(define_expand "vec_roundc"
+ [(set (match_operand:V2DI 0 "register_operand" "")
+ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "")
+ (const_int VEC_RND_CURRENT)]
+ UNSPEC_VEC_VFIDB))]
+ "TARGET_VX")
+
+(define_expand "vec_round"
+ [(set (match_operand:V2DI 0 "register_operand" "")
+ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "")
+ (const_int VEC_RND_NEAREST_TO_EVEN)]
+ UNSPEC_VEC_VFIDB))]
+ "TARGET_VX")
+
+
+; Vector load lengthened - V4SF -> V2DF
+
+(define_insn "*vldeb"
+ [(set (match_operand:V2DF 0 "register_operand" "=v")
+ (unspec:V2DF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VEC_VLDEB))]
+ "TARGET_VX"
+ "vldeb\t%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+(define_expand "vec_ld2f"
+ [; Initialize a vector to all zeroes. FIXME: This should not be
+ ; necessary since all elements of the vector will be set anyway.
+ ; This is just to make it explicit to the data flow framework.
+ (set (match_dup 2) (match_dup 3))
+ (set (match_dup 2) (unspec:V4SF [(match_operand:SF 1 "memory_operand" "")
+ (const_int 0)
+ (match_dup 2)]
+ UNSPEC_VEC_SET))
+ (set (match_dup 2) (unspec:V4SF [(match_dup 4)
+ (const_int 2)
+ (match_dup 2)]
+ UNSPEC_VEC_SET))
+ (set (match_operand:V2DF 0 "register_operand" "")
+ (unspec:V2DF [(match_dup 2)] UNSPEC_VEC_VLDEB))]
+ "TARGET_VX"
+{
+ operands[2] = gen_reg_rtx (V4SFmode);
+ operands[3] = CONST0_RTX (V4SFmode);
+ operands[4] = adjust_address (operands[1], SFmode, 4);
+})
+
+
+; Vector load rounded - V2DF -> V4SF
+
+(define_insn "*vledb"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V2DF 1 "register_operand" "v")]
+ UNSPEC_VEC_VLEDB))]
+ "TARGET_VX"
+ "vledb\t%v0,%v1,0,0"
+ [(set_attr "op_type" "VRR")])
+
+(define_expand "vec_st2f"
+ [(set (match_dup 2)
+ (unspec:V4SF [(match_operand:V2DF 0 "register_operand" "")]
+ UNSPEC_VEC_VLEDB))
+ (set (match_operand:SF 1 "memory_operand" "")
+ (unspec:SF [(match_dup 2) (const_int 0)] UNSPEC_VEC_EXTRACT))
+ (set (match_dup 3)
+ (unspec:SF [(match_dup 2) (const_int 2)] UNSPEC_VEC_EXTRACT))]
+ "TARGET_VX"
+{
+ operands[2] = gen_reg_rtx (V4SFmode);
+ operands[3] = adjust_address (operands[1], SFmode, 4);
+})
+
+
+; Vector load negated fp
+
+(define_expand "vec_nabs"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (neg:V2DF (abs:V2DF (match_operand:V2DF 1 "register_operand" ""))))]
+ "TARGET_VX")
+
+; Vector square root fp vec_sqrt -> sqrt rtx standard name
+
+; Vector FP test data class immediate
+
+(define_insn "*vftcidb"
+ [(set (match_operand:V2DF 0 "register_operand" "=v")
+ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "v")
+ (match_operand:SI 2 "immediate_operand" "J")]
+ UNSPEC_VEC_VFTCIDB))
+ (set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_dup 1) (match_dup 2)] UNSPEC_VEC_VFTCIDBCC))]
+ "TARGET_VX"
+ "vftcidb\t%v0,%v1,%x2"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "*vftcidb_cconly"
+ [(set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_operand:V2DF 1 "register_operand" "v")
+ (match_operand:SI 2 "immediate_operand" "J")]
+ UNSPEC_VEC_VFTCIDBCC))
+ (clobber (match_scratch:V2DI 0 "=v"))]
+ "TARGET_VX"
+ "vftcidb\t%v0,%v1,%x2"
+ [(set_attr "op_type" "VRR")])
+
+(define_expand "vftcidb"
+ [(parallel
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "")
+ (match_operand:SI 2 "immediate_operand" "")]
+ UNSPEC_VEC_VFTCIDB))
+ (set (reg:CCRAW CC_REGNUM)
+ (unspec:CCRAW [(match_dup 1) (match_dup 2)] UNSPEC_VEC_VFTCIDBCC))])
+ (set (match_operand:SI 3 "memory_operand" "")
+ (unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX")
+
+;;
+;; Integer compares
+;;
+
+; All comparisons which produce a CC need fully populated (VI_HW)
+; vector arguments. Otherwise the any/all CCs would be just bogus.
+
+(define_insn "*vec_cmp<VICMP:insn_cmp><VI_HW:mode>_cconly"
+ [(set (reg:VICMP CC_REGNUM)
+ (compare:VICMP (match_operand:VI_HW 0 "register_operand" "v")
+ (match_operand:VI_HW 1 "register_operand" "v")))
+ (clobber (match_scratch:VI_HW 2 "=v"))]
+ "TARGET_VX"
+ "vc<VICMP:insn_cmp><VI_HW:bhfgq>s\t%v2,%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+; FIXME: The following 2x3 definitions should be merged into 2 with
+; VICMP like above but I could not find a way to set the comparison
+; operator (eq) depending on the mode CCVEQ (mode_iterator). Or the
+; other way around - setting the mode depending on the code
+; (code_iterator).
+(define_expand "vec_cmpeq<VI_HW:mode>_cc"
+ [(parallel
+ [(set (reg:CCVEQ CC_REGNUM)
+ (compare:CCVEQ (match_operand:VI_HW 1 "register_operand" "v")
+ (match_operand:VI_HW 2 "register_operand" "v")))
+ (set (match_operand:VI_HW 0 "register_operand" "=v")
+ (eq:VI_HW (match_dup 1) (match_dup 2)))])
+ (set (match_operand:SI 3 "memory_operand" "")
+ (unspec:SI [(reg:CCVEQ CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX")
+
+(define_expand "vec_cmph<VI_HW:mode>_cc"
+ [(parallel
+ [(set (reg:CCVH CC_REGNUM)
+ (compare:CCVH (match_operand:VI_HW 1 "register_operand" "v")
+ (match_operand:VI_HW 2 "register_operand" "v")))
+ (set (match_operand:VI_HW 0 "register_operand" "=v")
+ (gt:VI_HW (match_dup 1) (match_dup 2)))])
+ (set (match_operand:SI 3 "memory_operand" "")
+ (unspec:SI [(reg:CCVH CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX")
+
+(define_expand "vec_cmphl<VI_HW:mode>_cc"
+ [(parallel
+ [(set (reg:CCVHU CC_REGNUM)
+ (compare:CCVHU (match_operand:VI_HW 1 "register_operand" "v")
+ (match_operand:VI_HW 2 "register_operand" "v")))
+ (set (match_operand:VI_HW 0 "register_operand" "=v")
+ (gtu:VI_HW (match_dup 1) (match_dup 2)))])
+ (set (match_operand:SI 3 "memory_operand" "")
+ (unspec:SI [(reg:CCVHU CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX")
+
+
+(define_insn "*vec_cmpeq<VI_HW:mode>_cc"
+ [(set (reg:CCVEQ CC_REGNUM)
+ (compare:CCVEQ (match_operand:VI_HW 0 "register_operand" "v")
+ (match_operand:VI_HW 1 "register_operand" "v")))
+ (set (match_operand:VI_HW 2 "register_operand" "=v")
+ (eq:VI_HW (match_dup 0) (match_dup 1)))]
+ "TARGET_VX"
+ "vceq<VI_HW:bhfgq>s\t%v2,%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "*vec_cmph<VI_HW:mode>_cc"
+ [(set (reg:CCVH CC_REGNUM)
+ (compare:CCVH (match_operand:VI_HW 0 "register_operand" "v")
+ (match_operand:VI_HW 1 "register_operand" "v")))
+ (set (match_operand:VI_HW 2 "register_operand" "=v")
+ (gt:VI_HW (match_dup 0) (match_dup 1)))]
+ "TARGET_VX"
+ "vch<VI_HW:bhfgq>s\t%v2,%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "*vec_cmphl<VI_HW:mode>_cc"
+ [(set (reg:CCVHU CC_REGNUM)
+ (compare:CCVHU (match_operand:VI_HW 0 "register_operand" "v")
+ (match_operand:VI_HW 1 "register_operand" "v")))
+ (set (match_operand:VI_HW 2 "register_operand" "=v")
+ (gtu:VI_HW (match_dup 0) (match_dup 1)))]
+ "TARGET_VX"
+ "vchl<VI_HW:bhfgq>s\t%v2,%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+;;
+;; Floating point comparesg
+;;
+
+(define_insn "*vec_cmp<insn_cmp>v2df_cconly"
+ [(set (reg:VFCMP CC_REGNUM)
+ (compare:VFCMP (match_operand:V2DF 0 "register_operand" "v")
+ (match_operand:V2DF 1 "register_operand" "v")))
+ (clobber (match_scratch:V2DI 2 "=v"))]
+ "TARGET_VX"
+ "vfc<asm_fcmp>dbs\t%v2,%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+; FIXME: Merge the following 2x3 patterns with VFCMP
+(define_expand "vec_cmpeqv2df_cc"
+ [(parallel
+ [(set (reg:CCVEQ CC_REGNUM)
+ (compare:CCVEQ (match_operand:V2DF 1 "register_operand" "v")
+ (match_operand:V2DF 2 "register_operand" "v")))
+ (set (match_operand:V2DI 0 "register_operand" "=v")
+ (eq:V2DI (match_dup 1) (match_dup 2)))])
+ (set (match_operand:SI 3 "memory_operand" "")
+ (unspec:SI [(reg:CCVEQ CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX")
+
+(define_expand "vec_cmphv2df_cc"
+ [(parallel
+ [(set (reg:CCVH CC_REGNUM)
+ (compare:CCVH (match_operand:V2DF 1 "register_operand" "v")
+ (match_operand:V2DF 2 "register_operand" "v")))
+ (set (match_operand:V2DI 0 "register_operand" "=v")
+ (gt:V2DI (match_dup 1) (match_dup 2)))])
+ (set (match_operand:SI 3 "memory_operand" "")
+ (unspec:SI [(reg:CCVH CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX")
+
+(define_expand "vec_cmphev2df_cc"
+ [(parallel
+ [(set (reg:CCVFHE CC_REGNUM)
+ (compare:CCVFHE (match_operand:V2DF 1 "register_operand" "v")
+ (match_operand:V2DF 2 "register_operand" "v")))
+ (set (match_operand:V2DI 0 "register_operand" "=v")
+ (ge:V2DI (match_dup 1) (match_dup 2)))])
+ (set (match_operand:SI 3 "memory_operand" "")
+ (unspec:SI [(reg:CCVFHE CC_REGNUM)] UNSPEC_CC_TO_INT))]
+ "TARGET_VX")
+
+
+(define_insn "*vec_cmpeqv2df_cc"
+ [(set (reg:CCVEQ CC_REGNUM)
+ (compare:CCVEQ (match_operand:V2DF 0 "register_operand" "v")
+ (match_operand:V2DF 1 "register_operand" "v")))
+ (set (match_operand:V2DI 2 "register_operand" "=v")
+ (eq:V2DI (match_dup 0) (match_dup 1)))]
+ "TARGET_VX"
+ "vfcedbs\t%v2,%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "*vec_cmphv2df_cc"
+ [(set (reg:CCVH CC_REGNUM)
+ (compare:CCVH (match_operand:V2DF 0 "register_operand" "v")
+ (match_operand:V2DF 1 "register_operand" "v")))
+ (set (match_operand:V2DI 2 "register_operand" "=v")
+ (gt:V2DI (match_dup 0) (match_dup 1)))]
+ "TARGET_VX"
+ "vfchdbs\t%v2,%v0,%v1"
+ [(set_attr "op_type" "VRR")])
+
+(define_insn "*vec_cmphev2df_cc"
+ [(set (reg:CCVFHE CC_REGNUM)
+ (compare:CCVFHE (match_operand:V2DF 0 "register_operand" "v")
+ (match_operand:V2DF 1 "register_operand" "v")))
+ (set (match_operand:V2DI 2 "register_operand" "=v")
+ (ge:V2DI (match_dup 0) (match_dup 1)))]
+ "TARGET_VX"
+ "vfchedbs\t%v2,%v0,%v1"
+ [(set_attr "op_type" "VRR")])