summaryrefslogtreecommitdiff
path: root/gcc/config/i386
diff options
context:
space:
mode:
authorjakub <jakub@138bc75d-0d04-0410-961f-82ee72b054a4>2014-01-04 09:57:36 +0000
committerjakub <jakub@138bc75d-0d04-0410-961f-82ee72b054a4>2014-01-04 09:57:36 +0000
commit8688c545f63b6950c7be4a3a65144177e521b305 (patch)
tree0825b32b591ed13bbe28597519cb1b2c610c122a /gcc/config/i386
parentbd8f578b142037be0d8f60af0aea72e898ee7f73 (diff)
downloadgcc-8688c545f63b6950c7be4a3a65144177e521b305.tar.gz
* config/i386/sse.md (avx512f_load<mode>_mask): Emit vmovup{s,d}
or vmovdqu* for misaligned_operand. (<sse>_loadu<ssemodesuffix><avxsizesuffix><mask_name>, <sse2_avx_avx512f>_loaddqu<mode><mask_name>): Handle <mask_applied>. * config/i386/i386.c (ix86_expand_special_args_builtin): Set aligned_mem for AVX512F masked aligned load and store builtins and for non-temporal moves. * gcc.target/i386/avx512f-vmovdqu32-1.c: Allow vmovdqu64 instead of vmovdqu32. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@206332 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/config/i386')
-rw-r--r--gcc/config/i386/i386.c38
-rw-r--r--gcc/config/i386/sse.md26
2 files changed, 52 insertions, 12 deletions
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index d2f5b6e9fda..1fc68e144bb 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -34407,6 +34407,9 @@ ix86_expand_special_args_builtin (const struct builtin_description *d,
case CODE_FOR_sse2_movntidi:
case CODE_FOR_sse_movntq:
case CODE_FOR_sse2_movntisi:
+ case CODE_FOR_avx512f_movntv16sf:
+ case CODE_FOR_avx512f_movntv8df:
+ case CODE_FOR_avx512f_movntv8di:
aligned_mem = true;
break;
default:
@@ -34431,6 +34434,24 @@ ix86_expand_special_args_builtin (const struct builtin_description *d,
klass = load;
memory = 0;
break;
+ case VOID_FTYPE_PV8DF_V8DF_QI:
+ case VOID_FTYPE_PV16SF_V16SF_HI:
+ case VOID_FTYPE_PV8DI_V8DI_QI:
+ case VOID_FTYPE_PV16SI_V16SI_HI:
+ switch (icode)
+ {
+ /* These builtins and instructions require the memory
+ to be properly aligned. */
+ case CODE_FOR_avx512f_storev16sf_mask:
+ case CODE_FOR_avx512f_storev16si_mask:
+ case CODE_FOR_avx512f_storev8df_mask:
+ case CODE_FOR_avx512f_storev8di_mask:
+ aligned_mem = true;
+ break;
+ default:
+ break;
+ }
+ /* FALLTHRU */
case VOID_FTYPE_PV8SF_V8SI_V8SF:
case VOID_FTYPE_PV4DF_V4DI_V4DF:
case VOID_FTYPE_PV4SF_V4SI_V4SF:
@@ -34439,10 +34460,6 @@ ix86_expand_special_args_builtin (const struct builtin_description *d,
case VOID_FTYPE_PV4DI_V4DI_V4DI:
case VOID_FTYPE_PV4SI_V4SI_V4SI:
case VOID_FTYPE_PV2DI_V2DI_V2DI:
- case VOID_FTYPE_PV8DF_V8DF_QI:
- case VOID_FTYPE_PV16SF_V16SF_HI:
- case VOID_FTYPE_PV8DI_V8DI_QI:
- case VOID_FTYPE_PV16SI_V16SI_HI:
case VOID_FTYPE_PDOUBLE_V2DF_QI:
case VOID_FTYPE_PFLOAT_V4SF_QI:
nargs = 2;
@@ -34459,6 +34476,19 @@ ix86_expand_special_args_builtin (const struct builtin_description *d,
nargs = 3;
klass = load;
memory = 0;
+ switch (icode)
+ {
+ /* These builtins and instructions require the memory
+ to be properly aligned. */
+ case CODE_FOR_avx512f_loadv16sf_mask:
+ case CODE_FOR_avx512f_loadv16si_mask:
+ case CODE_FOR_avx512f_loadv8df_mask:
+ case CODE_FOR_avx512f_loadv8di_mask:
+ aligned_mem = true;
+ break;
+ default:
+ break;
+ }
break;
case VOID_FTYPE_UINT_UINT_UINT:
case VOID_FTYPE_UINT64_UINT_UINT:
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 405f9988d9b..dfc98ba813a 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -786,8 +786,12 @@
{
case MODE_V8DF:
case MODE_V16SF:
+ if (misaligned_operand (operands[1], <MODE>mode))
+ return "vmovu<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
return "vmova<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
default:
+ if (misaligned_operand (operands[1], <MODE>mode))
+ return "vmovdqu<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
return "vmovdqa<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
}
}
@@ -936,11 +940,14 @@
false, still emit UNSPEC_LOADU insn to honor user's request for
misaligned load. */
if (TARGET_AVX
- && misaligned_operand (operands[1], <MODE>mode)
- /* FIXME: Revisit after AVX512F merge is completed. */
- && !<mask_applied>)
+ && misaligned_operand (operands[1], <MODE>mode))
{
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ rtx src = operands[1];
+ if (<mask_applied>)
+ src = gen_rtx_VEC_MERGE (<MODE>mode, operands[1],
+ operands[2 * <mask_applied>],
+ operands[3 * <mask_applied>]);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], src));
DONE;
}
})
@@ -1046,11 +1053,14 @@
false, still emit UNSPEC_LOADU insn to honor user's request for
misaligned load. */
if (TARGET_AVX
- && misaligned_operand (operands[1], <MODE>mode)
- /* FIXME: Revisit after AVX512F merge is completed. */
- && !<mask_applied>)
+ && misaligned_operand (operands[1], <MODE>mode))
{
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ rtx src = operands[1];
+ if (<mask_applied>)
+ src = gen_rtx_VEC_MERGE (<MODE>mode, operands[1],
+ operands[2 * <mask_applied>],
+ operands[3 * <mask_applied>]);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], src));
DONE;
}
})