summaryrefslogtreecommitdiff
path: root/test/CodeGen/avx512vl-builtins.c
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2016-11-16 09:27:40 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2016-11-16 09:27:40 +0000
commit86ec28523592e3bf7dca7a0579cc18de3d49262b (patch)
treea1595a63f523054754523df86aabb4c459d04d80 /test/CodeGen/avx512vl-builtins.c
parent4a9b9bf7798814aa9e32c1e4df0c1ccb65d992e7 (diff)
downloadclang-86ec28523592e3bf7dca7a0579cc18de3d49262b.tar.gz
[X86][AVX512] Replace lossless i32/u32 to f64 conversion intrinsics with generic IR
Both the (V)CVTDQ2PD (i32 to f64) and (V)CVTUDQ2PD (u32 to f64) conversion instructions are lossless and can be safely represented as generic __builtin_convertvector calls instead of x86 intrinsics without affecting final codegen. This patch removes the clang builtins and their use in the headers - a future patch will deal with removing the llvm intrinsics. This is an extension patch to D20528 which dealt with the equivalent sse/avx cases. Differential Revision: https://reviews.llvm.org/D26686 git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@287088 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/avx512vl-builtins.c')
-rw-r--r--test/CodeGen/avx512vl-builtins.c53
1 files changed, 33 insertions, 20 deletions
diff --git a/test/CodeGen/avx512vl-builtins.c b/test/CodeGen/avx512vl-builtins.c
index 01ff486874..c330357dee 100644
--- a/test/CodeGen/avx512vl-builtins.c
+++ b/test/CodeGen/avx512vl-builtins.c
@@ -1737,23 +1737,29 @@ void test_mm256_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m256i __A)
}
__m128d test_mm_mask_cvtepi32_pd(__m128d __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_cvtepi32_pd
- // CHECK: @llvm.x86.avx512.mask.cvtdq2pd.128
- return _mm_mask_cvtepi32_pd(__W,__U,__A);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
+ // CHECK: sitofp <2 x i32> %{{.*}} to <2 x double>
+ // CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
+ return _mm_mask_cvtepi32_pd(__W,__U,__A);
}
__m128d test_mm_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_maskz_cvtepi32_pd
- // CHECK: @llvm.x86.avx512.mask.cvtdq2pd.128
- return _mm_maskz_cvtepi32_pd(__U,__A);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
+ // CHECK: sitofp <2 x i32> %{{.*}} to <2 x double>
+ // CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
+ return _mm_maskz_cvtepi32_pd(__U,__A);
}
__m256d test_mm256_mask_cvtepi32_pd(__m256d __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm256_mask_cvtepi32_pd
- // CHECK: @llvm.x86.avx512.mask.cvtdq2pd.256
- return _mm256_mask_cvtepi32_pd(__W,__U,__A);
+ // CHECK: sitofp <4 x i32> %{{.*}} to <4 x double>
+ // CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
+ return _mm256_mask_cvtepi32_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm256_maskz_cvtepi32_pd
- // CHECK: @llvm.x86.avx512.mask.cvtdq2pd.256
- return _mm256_maskz_cvtepi32_pd(__U,__A);
+ // CHECK: sitofp <4 x i32> %{{.*}} to <4 x double>
+ // CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
+ return _mm256_maskz_cvtepi32_pd(__U,__A);
}
__m128 test_mm_mask_cvtepi32_ps(__m128 __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_cvtepi32_ps
@@ -2017,33 +2023,40 @@ __m256i test_mm256_maskz_cvttps_epu32(__mmask8 __U, __m256 __A) {
}
__m128d test_mm_cvtepu32_pd(__m128i __A) {
// CHECK-LABEL: @test_mm_cvtepu32_pd
- // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.128
- return _mm_cvtepu32_pd(__A);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
+ // CHECK: uitofp <2 x i32> %{{.*}} to <2 x double>
+ return _mm_cvtepu32_pd(__A);
}
__m128d test_mm_mask_cvtepu32_pd(__m128d __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_cvtepu32_pd
- // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.128
- return _mm_mask_cvtepu32_pd(__W,__U,__A);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
+ // CHECK: uitofp <2 x i32> %{{.*}} to <2 x double>
+ // CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
+ return _mm_mask_cvtepu32_pd(__W,__U,__A);
}
__m128d test_mm_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_maskz_cvtepu32_pd
- // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.128
- return _mm_maskz_cvtepu32_pd(__U,__A);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
+ // CHECK: uitofp <2 x i32> %{{.*}} to <2 x double>
+ // CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
+ return _mm_maskz_cvtepu32_pd(__U,__A);
}
__m256d test_mm256_cvtepu32_pd(__m128i __A) {
// CHECK-LABEL: @test_mm256_cvtepu32_pd
- // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.256
- return _mm256_cvtepu32_pd(__A);
+ // CHECK: uitofp <4 x i32> %{{.*}} to <4 x double>
+ return _mm256_cvtepu32_pd(__A);
}
__m256d test_mm256_mask_cvtepu32_pd(__m256d __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm256_mask_cvtepu32_pd
- // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.256
- return _mm256_mask_cvtepu32_pd(__W,__U,__A);
+ // CHECK: uitofp <4 x i32> %{{.*}} to <4 x double>
+ // CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
+ return _mm256_mask_cvtepu32_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm256_maskz_cvtepu32_pd
- // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.256
- return _mm256_maskz_cvtepu32_pd(__U,__A);
+ // CHECK: uitofp <4 x i32> %{{.*}} to <4 x double>
+ // CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
+ return _mm256_maskz_cvtepu32_pd(__U,__A);
}
__m128 test_mm_cvtepu32_ps(__m128i __A) {
// CHECK-LABEL: @test_mm_cvtepu32_ps