diff options
-rw-r--r-- | gcc/ChangeLog | 13 | ||||
-rw-r--r-- | gcc/config/spu/spu.md | 470 | ||||
-rw-r--r-- | gcc/testsuite/ChangeLog | 11 | ||||
-rw-r--r-- | gcc/testsuite/gcc.dg/vect/fast-math-pr35982.c | 1 | ||||
-rw-r--r-- | gcc/testsuite/gcc.dg/vect/slp-11.c | 3 | ||||
-rw-r--r-- | gcc/testsuite/gcc.dg/vect/slp-18.c | 1 | ||||
-rw-r--r-- | gcc/testsuite/lib/target-supports.exp | 12 |
7 files changed, 506 insertions, 5 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 7db55b34106..7aae9b1696d 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,16 @@ +2008-08-26 Victor Kaplansky <victork@il.ibm.com> + Dorit Nuzman <dorit@il.ibm.com> + + * gcc/config/spu/spu.md (vec_extract_evenv4si, + vec_extract_evenv4sf, vec_extract_evenv8hi, + vec_extract_evenv16qi, vec_extract_oddv4si, + vec_extract_oddv4sf, vec_extract_oddv8hi, vec_extract_oddv16qi, + vec_interleave_highv4sf, vec_interleave_lowv4sf, + vec_interleave_highv4si, vec_interleave_lowv4si, + vec_interleave_highv8hi, vec_interleave_lowv8hi, + vec_interleave_highv16qi, vec_interleave_lowv16qi, + vec_pack_trunc_v8hi, vec_pack_trunc_v4si): Implement.S + 2008-08-25 Janis Johnson <janis187@us.ibm.com> PR target/36756 diff --git a/gcc/config/spu/spu.md b/gcc/config/spu/spu.md index ffe46f53179..89f2109ceb3 100644 --- a/gcc/config/spu/spu.md +++ b/gcc/config/spu/spu.md @@ -4757,6 +4757,476 @@ DONE; DONE; }) + +(define_expand "vec_extract_evenv4si" + [(set (match_operand:V4SI 0 "spu_reg_operand" "=r") + (vec_concat:V4SI + (vec_select:V2SI + (match_operand:V4SI 1 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 2)])) + (vec_select:V2SI + (match_operand:V4SI 2 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 2)]))))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x00, 0x01, 0x02, 0x03, + 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, + 0x18, 0x19, 0x1A, 0x1B}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; +}") + + +(define_expand "vec_extract_evenv4sf" + [(set (match_operand:V4SF 0 "spu_reg_operand" "=r") + (vec_concat:V4SF + (vec_select:V2SF + (match_operand:V4SF 1 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 2)])) + (vec_select:V2SF + (match_operand:V4SF 2 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 2)]))))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x00, 0x01, 0x02, 0x03, + 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, + 0x18, 0x19, 0x1A, 0x1B}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; +}") + +(define_expand "vec_extract_evenv8hi" + [(set (match_operand:V8HI 0 "spu_reg_operand" "=r") + (vec_concat:V8HI + (vec_select:V4HI + (match_operand:V8HI 1 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 2)(const_int 4)(const_int 6)])) + (vec_select:V4HI + (match_operand:V8HI 2 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 2)(const_int 4)(const_int 6)]))))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x00, 0x01, 0x04, 0x05, + 0x08, 0x09, 0x0C, 0x0D, + 0x10, 0x11, 0x14, 0x15, + 0x18, 0x19, 0x1C, 0x1D}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; +}") + +(define_expand "vec_extract_evenv16qi" + [(set (match_operand:V16QI 0 "spu_reg_operand" "=r") + (vec_concat:V16QI + (vec_select:V8QI + (match_operand:V16QI 1 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 2)(const_int 4)(const_int 6) + (const_int 8)(const_int 10)(const_int 12)(const_int 14)])) + (vec_select:V8QI + (match_operand:V16QI 2 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 2)(const_int 4)(const_int 6) + (const_int 8)(const_int 10)(const_int 12)(const_int 14)]))))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x00, 0x02, 0x04, 0x06, + 0x08, 0x0A, 0x0C, 0x0E, + 0x10, 0x12, 0x14, 0x16, + 0x18, 0x1A, 0x1C, 0x1E}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; +}") + +(define_expand "vec_extract_oddv4si" + [(set (match_operand:V4SI 0 "spu_reg_operand" "=r") + (vec_concat:V4SI + (vec_select:V2SI + (match_operand:V4SI 1 "spu_reg_operand" "r") + (parallel [(const_int 1)(const_int 3)])) + (vec_select:V2SI + (match_operand:V4SI 2 "spu_reg_operand" "r") + (parallel [(const_int 1)(const_int 3)]))))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x04, 0x05, 0x06, 0x07, + 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, + 0x1C, 0x1D, 0x1E, 0x1F}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; +}") + +(define_expand "vec_extract_oddv4sf" + [(set (match_operand:V4SF 0 "spu_reg_operand" "=r") + (vec_concat:V4SF + (vec_select:V2SF + (match_operand:V4SF 1 "spu_reg_operand" "r") + (parallel [(const_int 1)(const_int 3)])) + (vec_select:V2SF + (match_operand:V4SF 2 "spu_reg_operand" "r") + (parallel [(const_int 1)(const_int 3)]))))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x04, 0x05, 0x06, 0x07, + 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, + 0x1C, 0x1D, 0x1E, 0x1F}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; +}") + +(define_expand "vec_extract_oddv8hi" + [(set (match_operand:V8HI 0 "spu_reg_operand" "=r") + (vec_concat:V8HI + (vec_select:V4HI + (match_operand:V8HI 1 "spu_reg_operand" "r") + (parallel [(const_int 1)(const_int 3)(const_int 5)(const_int 7)])) + (vec_select:V4HI + (match_operand:V8HI 2 "spu_reg_operand" "r") + (parallel [(const_int 1)(const_int 3)(const_int 5)(const_int 7)]))))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x02, 0x03, 0x06, 0x07, + 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, + 0x1A, 0x1B, 0x1E, 0x1F}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; +}") + +(define_expand "vec_extract_oddv16qi" + [(set (match_operand:V16QI 0 "spu_reg_operand" "=r") + (vec_concat:V16QI + (vec_select:V8QI + (match_operand:V16QI 1 "spu_reg_operand" "r") + (parallel [(const_int 1)(const_int 3)(const_int 5)(const_int 7) + (const_int 9)(const_int 11)(const_int 13)(const_int 15)])) + (vec_select:V8QI + (match_operand:V16QI 2 "spu_reg_operand" "r") + (parallel [(const_int 1)(const_int 3)(const_int 5)(const_int 7) + (const_int 9)(const_int 11)(const_int 13)(const_int 15)]))))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x01, 0x03, 0x05, 0x07, + 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, + 0x19, 0x1B, 0x1D, 0x1F}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; +}") + +(define_expand "vec_interleave_highv4sf" + [(set (match_operand:V4SF 0 "spu_reg_operand" "=r") + (vec_select:V4SF + (vec_concat:V4SF + (vec_select:V2SF + (match_operand:V4SF 1 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 1)])) + (vec_select:V2SF + (match_operand:V4SF 2 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 1)]))) + (parallel [(const_int 0)(const_int 2)(const_int 1)(const_int 3)])))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x00, 0x01, 0x02, 0x03, + 0x10, 0x11, 0x12, 0x13, + 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; +}") +(define_expand "vec_interleave_lowv4sf" + [(set (match_operand:V4SF 0 "spu_reg_operand" "=r") + (vec_select:V4SF + (vec_concat:V4SF + (vec_select:V2SF + (match_operand:V4SF 1 "spu_reg_operand" "r") + (parallel [(const_int 2)(const_int 3)])) + (vec_select:V2SF + (match_operand:V4SF 2 "spu_reg_operand" "r") + (parallel [(const_int 2)(const_int 3)]))) + (parallel [(const_int 0)(const_int 2)(const_int 1)(const_int 3)])))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x08, 0x09, 0x0A, 0x0B, + 0x18, 0x19, 0x1A, 0x1B, + 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; +}") + +(define_expand "vec_interleave_highv4si" + [(set (match_operand:V4SI 0 "spu_reg_operand" "=r") + (vec_select:V4SI + (vec_concat:V4SI + (vec_select:V2SI + (match_operand:V4SI 1 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 1)])) + (vec_select:V2SI + (match_operand:V4SI 2 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 1)]))) + (parallel [(const_int 0)(const_int 2)(const_int 1)(const_int 3)])))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x00, 0x01, 0x02, 0x03, + 0x10, 0x11, 0x12, 0x13, + 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; +}") + +(define_expand "vec_interleave_lowv4si" + [(set (match_operand:V4SI 0 "spu_reg_operand" "=r") + (vec_select:V4SI + (vec_concat:V4SI + (vec_select:V2SI + (match_operand:V4SI 1 "spu_reg_operand" "r") + (parallel [(const_int 2)(const_int 3)])) + (vec_select:V2SI + (match_operand:V4SI 2 "spu_reg_operand" "r") + (parallel [(const_int 2)(const_int 3)]))) + (parallel [(const_int 0)(const_int 2)(const_int 1)(const_int 3)])))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x08, 0x09, 0x0A, 0x0B, + 0x18, 0x19, 0x1A, 0x1B, + 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; +}") + +(define_expand "vec_interleave_highv8hi" + [(set (match_operand:V8HI 0 "spu_reg_operand" "=r") + (vec_select:V8HI + (vec_concat:V8HI + (vec_select:V4HI + (match_operand:V8HI 1 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)])) + (vec_select:V4HI + (match_operand:V8HI 2 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)]))) + (parallel [(const_int 0)(const_int 4)(const_int 1)(const_int 5) + (const_int 2)(const_int 6)(const_int 3)(const_int 7)])))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x00, 0x01, 0x10, 0x11, + 0x02, 0x03, 0x12, 0x13, + 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; + }") + +(define_expand "vec_interleave_lowv8hi" + [(set (match_operand:V8HI 0 "spu_reg_operand" "=r") + (vec_select:V8HI + (vec_concat:V8HI + (vec_select:V4HI + (match_operand:V8HI 1 "spu_reg_operand" "r") + (parallel [(const_int 4)(const_int 5)(const_int 6)(const_int 7)])) + (vec_select:V4HI + (match_operand:V8HI 2 "spu_reg_operand" "r") + (parallel [(const_int 4)(const_int 5)(const_int 6)(const_int 7)]))) + (parallel [(const_int 0)(const_int 4)(const_int 1)(const_int 5) + (const_int 2)(const_int 6)(const_int 3)(const_int 7)])))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x08, 0x09, 0x18, 0x19, + 0x0A, 0x0B, 0x1A, 0x1B, + 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; +}") + +(define_expand "vec_interleave_highv16qi" + [(set (match_operand:V16QI 0 "spu_reg_operand" "=r") + (vec_select:V16QI + (vec_concat:V16QI + (vec_select:V8QI + (match_operand:V16QI 1 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3) + (const_int 4)(const_int 5)(const_int 6)(const_int 7)])) + (vec_select:V8QI + (match_operand:V16QI 2 "spu_reg_operand" "r") + (parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3) + (const_int 4)(const_int 5)(const_int 6)(const_int 7)]))) + (parallel [(const_int 0)(const_int 8)(const_int 1)(const_int 9) + (const_int 2)(const_int 10)(const_int 3)(const_int 11) + (const_int 4)(const_int 12)(const_int 5)(const_int 13) + (const_int 6)(const_int 14)(const_int 7)(const_int 15)])))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x00, 0x10, 0x01, 0x11, + 0x02, 0x12, 0x03, 0x13, + 0x04, 0x14, 0x05, 0x15, + 0x06, 0x16, 0x07, 0x17}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; +}") + +(define_expand "vec_interleave_lowv16qi" + [(set (match_operand:V16QI 0 "spu_reg_operand" "=r") + (vec_select:V16QI + (vec_concat:V16QI + (vec_select:V8QI + (match_operand:V16QI 1 "spu_reg_operand" "r") + (parallel [(const_int 8)(const_int 9)(const_int 10)(const_int 11) + (const_int 12)(const_int 13)(const_int 14)(const_int 15)])) + (vec_select:V8QI + (match_operand:V16QI 2 "spu_reg_operand" "r") + (parallel [(const_int 8)(const_int 9)(const_int 10)(const_int 11) + (const_int 12)(const_int 13)(const_int 14)(const_int 15)]))) + (parallel [(const_int 0)(const_int 8)(const_int 1)(const_int 9) + (const_int 2)(const_int 10)(const_int 3)(const_int 11) + (const_int 4)(const_int 12)(const_int 5)(const_int 13) + (const_int 6)(const_int 14)(const_int 7)(const_int 15)])))] + + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x08, 0x18, 0x09, 0x19, + 0x0A, 0x1A, 0x0B, 0x1B, + 0x0C, 0x1C, 0x0D, 0x1D, + 0x0E, 0x1E, 0x0F, 0x1F}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + DONE; +}") + +(define_expand "vec_pack_trunc_v8hi" + [(set (match_operand:V16QI 0 "spu_reg_operand" "=r") + (vec_concat:V16QI + (truncate:V8QI (match_operand:V8HI 1 "spu_reg_operand" "r")) + (truncate:V8QI (match_operand:V8HI 2 "spu_reg_operand" "r"))))] + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + + DONE; +}") + +(define_expand "vec_pack_trunc_v4si" + [(set (match_operand:V8HI 0 "spu_reg_operand" "=r") + (vec_concat:V8HI + (truncate:V4HI (match_operand:V4SI 1 "spu_reg_operand" "r")) + (truncate:V4HI (match_operand:V4SI 2 "spu_reg_operand" "r"))))] + "" + " +{ + rtx mask = gen_reg_rtx (TImode); + unsigned char arr[16] = { + 0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F}; + + emit_move_insn (mask, array_to_constant (TImode, arr)); + emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask)); + + DONE; +}") diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index f2202f4702c..2df0ac8a71b 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,14 @@ +2008-08-26 Victor Kaplansky <victork@il.ibm.com> + + * gcc.dg/vect/slp-18.c: Require vect_intfloat_cvt. + * gcc.dg/vect/slp-11.c: Likewise. + * gcc.dg/vect/fast-math-pr35982.c: Likewise. + * lib/target-supports.exp: + (check_effective_target_vect_pack_trunc): Add SPU to the list. + (check_effective_target_vect_extract_even_odd): Likewise. + (check_effective_target_vect_extract_even_odd_wide): Likewise. + (check_effective_target_vect_interleave): Likewise. + 2008-08-25 Ulrich Weigand <Ulrich.Weigand@de.ibm.com> Andrew Pinski <andrew_pinski@playstation.sony.com> diff --git a/gcc/testsuite/gcc.dg/vect/fast-math-pr35982.c b/gcc/testsuite/gcc.dg/vect/fast-math-pr35982.c index 2c788606771..6a01782bb3d 100644 --- a/gcc/testsuite/gcc.dg/vect/fast-math-pr35982.c +++ b/gcc/testsuite/gcc.dg/vect/fast-math-pr35982.c @@ -1,6 +1,7 @@ /* { dg-do compile } */ /* { dg-require-effective-target vect_float } */ /* { dg-require-effective-target vect_int } */ +/* { dg-require-effective-target vect_intfloat_cvt } */ struct mem { diff --git a/gcc/testsuite/gcc.dg/vect/slp-11.c b/gcc/testsuite/gcc.dg/vect/slp-11.c index d606438fd20..1e87eef4344 100644 --- a/gcc/testsuite/gcc.dg/vect/slp-11.c +++ b/gcc/testsuite/gcc.dg/vect/slp-11.c @@ -106,7 +106,8 @@ int main (void) return 0; } -/* { dg-final { scan-tree-dump-times "vectorized 3 loops" 1 "vect" { target { vect_strided_wide && vect_int_mult } } } } */ +/* { dg-final { scan-tree-dump-times "vectorized 3 loops" 1 "vect" { target { { vect_intfloat_cvt && vect_strided_wide } && vect_int_mult } } } } */ +/* { dg-final { scan-tree-dump-times "vectorized 2 loops" 1 "vect" { target { { { ! vect_intfloat_cvt } && vect_strided_wide } && vect_int_mult } } } } */ /* { dg-final { scan-tree-dump-times "vectorized 0 loops" 1 "vect" {target { ! { vect_int_mult && vect_strided_wide } } } } } */ /* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 0 "vect" } } */ /* { dg-final { cleanup-tree-dump "vect" } } */ diff --git a/gcc/testsuite/gcc.dg/vect/slp-18.c b/gcc/testsuite/gcc.dg/vect/slp-18.c index b8e122c6cfa..91e1e114c1b 100644 --- a/gcc/testsuite/gcc.dg/vect/slp-18.c +++ b/gcc/testsuite/gcc.dg/vect/slp-18.c @@ -1,4 +1,5 @@ /* { dg-require-effective-target vect_int } */ +/* { dg-require-effective-target vect_intfloat_cvt } */ #include <stdarg.h> #include <stdio.h> diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp index d525a5bf5a1..69c8ea438c6 100644 --- a/gcc/testsuite/lib/target-supports.exp +++ b/gcc/testsuite/lib/target-supports.exp @@ -1830,7 +1830,8 @@ proc check_effective_target_vect_pack_trunc { } { set et_vect_pack_trunc_saved 0 if { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*]) || [istarget i?86-*-*] - || [istarget x86_64-*-*] } { + || [istarget x86_64-*-*] + || [istarget spu-*-*] } { set et_vect_pack_trunc_saved 1 } } @@ -2101,7 +2102,8 @@ proc check_effective_target_vect_extract_even_odd { } { verbose "check_effective_target_vect_extract_even_odd: using cached result" 2 } else { set et_vect_extract_even_odd_saved 0 - if { [istarget powerpc*-*-*] } { + if { [istarget powerpc*-*-*] + || [istarget spu-*-*] } { set et_vect_extract_even_odd_saved 1 } } @@ -2122,7 +2124,8 @@ proc check_effective_target_vect_extract_even_odd_wide { } { set et_vect_extract_even_odd_wide_saved 0 if { [istarget powerpc*-*-*] || [istarget i?86-*-*] - || [istarget x86_64-*-*] } { + || [istarget x86_64-*-*] + || [istarget spu-*-*] } { set et_vect_extract_even_odd_wide_saved 1 } } @@ -2142,7 +2145,8 @@ proc check_effective_target_vect_interleave { } { set et_vect_interleave_saved 0 if { [istarget powerpc*-*-*] || [istarget i?86-*-*] - || [istarget x86_64-*-*] } { + || [istarget x86_64-*-*] + || [istarget spu-*-*] } { set et_vect_interleave_saved 1 } } |