diff options
Diffstat (limited to 'llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll')
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll | 146 |
1 files changed, 49 insertions, 97 deletions
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll index 35e687c4fcbc..67dbb5a92dfa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16( <vscale x 1 x half>, <vscale x 1 x half>, @@ -562,9 +562,8 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16( define <vscale x 1 x half> @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16( @@ -586,9 +585,8 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16( define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16( define <vscale x 2 x half> @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16( @@ -634,9 +631,8 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16( define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16( define <vscale x 4 x half> @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16( @@ -682,9 +677,8 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16( define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16( define <vscale x 8 x half> @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16( @@ -730,9 +723,8 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16( define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16( define <vscale x 16 x half> @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16( @@ -778,9 +769,8 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16( define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32( define <vscale x 1 x float> @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32( @@ -826,9 +815,8 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32( define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32( define <vscale x 2 x float> @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32( @@ -874,9 +861,8 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32( define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32( define <vscale x 4 x float> @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32( @@ -922,9 +907,8 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32( define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32( define <vscale x 8 x float> @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32( @@ -970,9 +953,8 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32( define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32( @@ -994,13 +976,8 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64( define <vscale x 1 x double> @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64( @@ -1022,13 +999,8 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64( define <vscale x 1 x double> @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64( @@ -1050,13 +1022,8 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64( define <vscale x 2 x double> @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64( @@ -1078,13 +1045,8 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64( define <vscale x 2 x double> @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64( @@ -1106,13 +1068,8 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64( define <vscale x 4 x double> @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64( @@ -1134,13 +1091,8 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64( define <vscale x 4 x double> @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64( |