diff options
Diffstat (limited to 'llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll')
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll | 92 |
1 files changed, 37 insertions, 55 deletions
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll index bd2dac949302..eb21c54c18e9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( <vscale x 1 x float>, <vscale x 1 x half>, @@ -424,9 +424,8 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.f16( define <vscale x 1 x float> @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.f16( @@ -448,9 +447,8 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.f16( define <vscale x 1 x float> @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.f16( @@ -472,9 +470,8 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.f16( define <vscale x 2 x float> @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.f16( @@ -496,9 +493,8 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.f16( define <vscale x 2 x float> @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.f16( @@ -520,9 +516,8 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.f16( define <vscale x 4 x float> @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.f16( @@ -544,9 +539,8 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.f16( define <vscale x 4 x float> @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.f16( @@ -568,9 +562,8 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.f16( define <vscale x 8 x float> @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.f16( @@ -592,9 +585,8 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.f16( define <vscale x 8 x float> @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.f16( @@ -616,9 +608,8 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.f16( define <vscale x 16 x float> @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.f16( @@ -640,9 +631,8 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.f16( define <vscale x 16 x float> @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.f16( @@ -664,9 +654,8 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32( define <vscale x 1 x double> @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32( @@ -688,9 +677,8 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32( define <vscale x 1 x double> @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32( @@ -712,9 +700,8 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32( define <vscale x 2 x double> @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32( @@ -736,9 +723,8 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32( define <vscale x 2 x double> @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32( @@ -760,9 +746,8 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32( define <vscale x 4 x double> @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32( @@ -784,9 +769,8 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32( define <vscale x 4 x double> @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32( @@ -808,9 +792,8 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32( define <vscale x 8 x double> @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32( @@ -832,9 +815,8 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32( define <vscale x 8 x double> @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32( |